problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.1k
25.4k
| golden_diff
stringlengths 145
5.13k
| verification_info
stringlengths 582
39.1k
| num_tokens
int64 271
4.1k
| num_tokens_diff
int64 47
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_32030
|
rasdani/github-patches
|
git_diff
|
jupyterhub__jupyterhub-1980
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Polish docs for 0.9 release
Open issues that we would like docs included (@minrk Please add to this list)
- [x] Merge and polish PR #1967 (Configuring user environments)
Additional 'to do' items
- [x] Update changelog
- [x] Update contributors list
Not strictly documentation
- [x] Update metadata in setup.py for pypi.org's additional display of docs now
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python3
2 # coding: utf-8
3
4 # Copyright (c) Juptyer Development Team.
5 # Distributed under the terms of the Modified BSD License.
6
7 #-----------------------------------------------------------------------------
8 # Minimal Python version sanity check (from IPython)
9 #-----------------------------------------------------------------------------
10
11 from __future__ import print_function
12
13 import os
14 import shutil
15 import sys
16
17 v = sys.version_info
18 if v[:2] < (3, 5):
19 error = "ERROR: JupyterHub requires Python version 3.5 or above."
20 print(error, file=sys.stderr)
21 sys.exit(1)
22
23 shell = False
24 if os.name in ('nt', 'dos'):
25 shell = True
26 warning = "WARNING: Windows is not officially supported"
27 print(warning, file=sys.stderr)
28
29 # At least we're on the python version we need, move on.
30
31 import os
32 from glob import glob
33 from subprocess import check_call
34
35 from setuptools import setup
36 from setuptools.command.bdist_egg import bdist_egg
37
38 pjoin = os.path.join
39
40 here = os.path.abspath(os.path.dirname(__file__))
41 share_jupyterhub = pjoin(here, 'share', 'jupyterhub')
42 static = pjoin(share_jupyterhub, 'static')
43
44 is_repo = os.path.exists(pjoin(here, '.git'))
45
46 #---------------------------------------------------------------------------
47 # Build basic package data, etc.
48 #---------------------------------------------------------------------------
49
50 def get_data_files():
51 """Get data files in share/jupyter"""
52
53 data_files = []
54 ntrim = len(here + os.path.sep)
55
56 for (d, dirs, filenames) in os.walk(share_jupyterhub):
57 data_files.append((
58 d[ntrim:],
59 [ pjoin(d, f) for f in filenames ]
60 ))
61 return data_files
62
63 def get_package_data():
64 """Get package data
65
66 (mostly alembic config)
67 """
68 package_data = {}
69 package_data['jupyterhub'] = [
70 'alembic.ini',
71 'alembic/*',
72 'alembic/versions/*',
73 ]
74 return package_data
75
76 ns = {}
77 with open(pjoin(here, 'jupyterhub', '_version.py')) as f:
78 exec(f.read(), {}, ns)
79
80
81 packages = []
82 for d, _, _ in os.walk('jupyterhub'):
83 if os.path.exists(pjoin(d, '__init__.py')):
84 packages.append(d.replace(os.path.sep, '.'))
85
86 setup_args = dict(
87 name = 'jupyterhub',
88 scripts = glob(pjoin('scripts', '*')),
89 packages = packages,
90 # dummy, so that install_data doesn't get skipped
91 # this will be overridden when bower is run anyway
92 data_files = get_data_files() or ['dummy'],
93 package_data = get_package_data(),
94 version = ns['__version__'],
95 description = "JupyterHub: A multi-user server for Jupyter notebooks",
96 long_description = "See https://jupyterhub.readthedocs.io for more info.",
97 author = "Jupyter Development Team",
98 author_email = "[email protected]",
99 url = "http://jupyter.org",
100 license = "BSD",
101 platforms = "Linux, Mac OS X",
102 keywords = ['Interactive', 'Interpreter', 'Shell', 'Web'],
103 python_requires = ">=3.5",
104 classifiers = [
105 'Intended Audience :: Developers',
106 'Intended Audience :: System Administrators',
107 'Intended Audience :: Science/Research',
108 'License :: OSI Approved :: BSD License',
109 'Programming Language :: Python',
110 'Programming Language :: Python :: 3',
111 ],
112 )
113
114 #---------------------------------------------------------------------------
115 # custom distutils commands
116 #---------------------------------------------------------------------------
117
118 # imports here, so they are after setuptools import if there was one
119 from distutils.cmd import Command
120 from distutils.command.build_py import build_py
121 from distutils.command.sdist import sdist
122
123
124 def mtime(path):
125 """shorthand for mtime"""
126 return os.stat(path).st_mtime
127
128
129 class BaseCommand(Command):
130 """Dumb empty command because Command needs subclasses to override too much"""
131 user_options = []
132
133 def initialize_options(self):
134 pass
135
136 def finalize_options(self):
137 pass
138
139 def get_inputs(self):
140 return []
141
142 def get_outputs(self):
143 return []
144
145
146 class NPM(BaseCommand):
147 description = "fetch static client-side components with bower"
148
149 user_options = []
150 node_modules = pjoin(here, 'node_modules')
151 bower_dir = pjoin(static, 'components')
152
153 def should_run(self):
154 if not shutil.which('npm'):
155 print("npm unavailable", file=sys.stderr)
156 return False
157 if not os.path.exists(self.bower_dir):
158 return True
159 if not os.path.exists(self.node_modules):
160 return True
161 if mtime(self.bower_dir) < mtime(self.node_modules):
162 return True
163 return mtime(self.node_modules) < mtime(pjoin(here, 'package.json'))
164
165 def run(self):
166 if not self.should_run():
167 print("npm dependencies up to date")
168 return
169
170 print("installing js dependencies with npm")
171 check_call(['npm', 'install', '--progress=false', '--unsafe-perm'], cwd=here, shell=shell)
172 os.utime(self.node_modules)
173
174 os.utime(self.bower_dir)
175 # update data-files in case this created new files
176 self.distribution.data_files = get_data_files()
177
178
179 class CSS(BaseCommand):
180 description = "compile CSS from LESS"
181
182 def should_run(self):
183 """Does less need to run?"""
184 # from IPython.html.tasks.py
185
186 css_targets = [pjoin(static, 'css', 'style.min.css')]
187 css_maps = [t + '.map' for t in css_targets]
188 targets = css_targets + css_maps
189 if not all(os.path.exists(t) for t in targets):
190 # some generated files don't exist
191 return True
192 earliest_target = sorted(mtime(t) for t in targets)[0]
193
194 # check if any .less files are newer than the generated targets
195 for (dirpath, dirnames, filenames) in os.walk(static):
196 for f in filenames:
197 if f.endswith('.less'):
198 path = pjoin(static, dirpath, f)
199 timestamp = mtime(path)
200 if timestamp > earliest_target:
201 return True
202
203 return False
204
205 def run(self):
206 if not self.should_run():
207 print("CSS up-to-date")
208 return
209
210 self.run_command('js')
211 print("Building css with less")
212
213 style_less = pjoin(static, 'less', 'style.less')
214 style_css = pjoin(static, 'css', 'style.min.css')
215 sourcemap = style_css + '.map'
216
217 args = [
218 'npm', 'run', 'lessc', '--', '--clean-css',
219 '--source-map-basepath={}'.format(static),
220 '--source-map={}'.format(sourcemap),
221 '--source-map-rootpath=../',
222 style_less, style_css,
223 ]
224 try:
225 check_call(args, cwd=here, shell=shell)
226 except OSError as e:
227 print("Failed to run lessc: %s" % e, file=sys.stderr)
228 print("You can install js dependencies with `npm install`", file=sys.stderr)
229 raise
230 # update data-files in case this created new files
231 self.distribution.data_files = get_data_files()
232
233
234 def js_css_first(cls, strict=True):
235 class Command(cls):
236 def run(self):
237 try:
238 self.run_command('js')
239 self.run_command('css')
240 except Exception:
241 if strict:
242 raise
243 else:
244 pass
245 return super().run()
246 return Command
247
248
249 class bdist_egg_disabled(bdist_egg):
250 """Disabled version of bdist_egg
251
252 Prevents setup.py install from performing setuptools' default easy_install,
253 which it should never ever do.
254 """
255 def run(self):
256 sys.exit("Aborting implicit building of eggs. Use `pip install .` to install from source.")
257
258
259 setup_args['cmdclass'] = {
260 'js': NPM,
261 'css': CSS,
262 'build_py': js_css_first(build_py, strict=is_repo),
263 'sdist': js_css_first(sdist, strict=True),
264 'bdist_egg': bdist_egg if 'bdist_egg' in sys.argv else bdist_egg_disabled,
265 }
266
267
268 # setuptools requirements
269
270 setup_args['zip_safe'] = False
271 from setuptools.command.develop import develop
272 class develop_js_css(develop):
273 def run(self):
274 if not self.uninstall:
275 self.distribution.run_command('js')
276 self.distribution.run_command('css')
277 develop.run(self)
278 setup_args['cmdclass']['develop'] = develop_js_css
279 setup_args['install_requires'] = install_requires = []
280
281 with open('requirements.txt') as f:
282 for line in f.readlines():
283 req = line.strip()
284 if not req or req.startswith('#') or '://' in req:
285 continue
286 install_requires.append(req)
287
288 #---------------------------------------------------------------------------
289 # setup
290 #---------------------------------------------------------------------------
291
292 def main():
293 setup(**setup_args)
294
295 if __name__ == '__main__':
296 main()
297
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -83,6 +83,10 @@
if os.path.exists(pjoin(d, '__init__.py')):
packages.append(d.replace(os.path.sep, '.'))
+with open('README.md', encoding="utf8") as f:
+ readme = f.read()
+
+
setup_args = dict(
name = 'jupyterhub',
scripts = glob(pjoin('scripts', '*')),
@@ -93,10 +97,11 @@
package_data = get_package_data(),
version = ns['__version__'],
description = "JupyterHub: A multi-user server for Jupyter notebooks",
- long_description = "See https://jupyterhub.readthedocs.io for more info.",
+ long_description = readme,
+ long_description_content_type = 'text/markdown',
author = "Jupyter Development Team",
author_email = "[email protected]",
- url = "http://jupyter.org",
+ url = "https://jupyter.org",
license = "BSD",
platforms = "Linux, Mac OS X",
keywords = ['Interactive', 'Interpreter', 'Shell', 'Web'],
@@ -109,6 +114,12 @@
'Programming Language :: Python',
'Programming Language :: Python :: 3',
],
+ project_urls = {
+ 'Documentation': 'https://jupyterhub.readthedocs.io',
+ 'Funding': 'https://jupyter.org/about',
+ 'Source': 'https://github.com/jupyterhub/jupyterhub/',
+ 'Tracker': 'https://github.com/jupyterhub/jupyterhub/issues',
+ },
)
#---------------------------------------------------------------------------
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -83,6 +83,10 @@\n if os.path.exists(pjoin(d, '__init__.py')):\n packages.append(d.replace(os.path.sep, '.'))\n \n+with open('README.md', encoding=\"utf8\") as f:\n+ readme = f.read()\n+\n+\n setup_args = dict(\n name = 'jupyterhub',\n scripts = glob(pjoin('scripts', '*')),\n@@ -93,10 +97,11 @@\n package_data = get_package_data(),\n version = ns['__version__'],\n description = \"JupyterHub: A multi-user server for Jupyter notebooks\",\n- long_description = \"See https://jupyterhub.readthedocs.io for more info.\",\n+ long_description = readme,\n+ long_description_content_type = 'text/markdown',\n author = \"Jupyter Development Team\",\n author_email = \"[email protected]\",\n- url = \"http://jupyter.org\",\n+ url = \"https://jupyter.org\",\n license = \"BSD\",\n platforms = \"Linux, Mac OS X\",\n keywords = ['Interactive', 'Interpreter', 'Shell', 'Web'],\n@@ -109,6 +114,12 @@\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n ],\n+ project_urls = {\n+ 'Documentation': 'https://jupyterhub.readthedocs.io',\n+ 'Funding': 'https://jupyter.org/about',\n+ 'Source': 'https://github.com/jupyterhub/jupyterhub/',\n+ 'Tracker': 'https://github.com/jupyterhub/jupyterhub/issues',\n+ },\n )\n \n #---------------------------------------------------------------------------\n", "issue": "Polish docs for 0.9 release\nOpen issues that we would like docs included (@minrk Please add to this list)\r\n- [x] Merge and polish PR #1967 (Configuring user environments)\r\n\r\nAdditional 'to do' items\r\n- [x] Update changelog\r\n- [x] Update contributors list\r\n\r\nNot strictly documentation\r\n- [x] Update metadata in setup.py for pypi.org's additional display of docs now\r\n\n", "before_files": [{"content": "#!/usr/bin/env python3\n# coding: utf-8\n\n# Copyright (c) Juptyer Development Team.\n# Distributed under the terms of the Modified BSD License.\n\n#-----------------------------------------------------------------------------\n# Minimal Python version sanity check (from IPython)\n#-----------------------------------------------------------------------------\n\nfrom __future__ import print_function\n\nimport os\nimport shutil\nimport sys\n\nv = sys.version_info\nif v[:2] < (3, 5):\n error = \"ERROR: JupyterHub requires Python version 3.5 or above.\"\n print(error, file=sys.stderr)\n sys.exit(1)\n\nshell = False\nif os.name in ('nt', 'dos'):\n shell = True\n warning = \"WARNING: Windows is not officially supported\"\n print(warning, file=sys.stderr)\n\n# At least we're on the python version we need, move on.\n\nimport os\nfrom glob import glob\nfrom subprocess import check_call\n\nfrom setuptools import setup\nfrom setuptools.command.bdist_egg import bdist_egg\n\npjoin = os.path.join\n\nhere = os.path.abspath(os.path.dirname(__file__))\nshare_jupyterhub = pjoin(here, 'share', 'jupyterhub')\nstatic = pjoin(share_jupyterhub, 'static')\n\nis_repo = os.path.exists(pjoin(here, '.git'))\n\n#---------------------------------------------------------------------------\n# Build basic package data, etc.\n#---------------------------------------------------------------------------\n\ndef get_data_files():\n \"\"\"Get data files in share/jupyter\"\"\"\n\n data_files = []\n ntrim = len(here + os.path.sep)\n\n for (d, dirs, filenames) in os.walk(share_jupyterhub):\n data_files.append((\n d[ntrim:],\n [ pjoin(d, f) for f in filenames ]\n ))\n return data_files\n\ndef get_package_data():\n \"\"\"Get package data\n\n (mostly alembic config)\n \"\"\"\n package_data = {}\n package_data['jupyterhub'] = [\n 'alembic.ini',\n 'alembic/*',\n 'alembic/versions/*',\n ]\n return package_data\n\nns = {}\nwith open(pjoin(here, 'jupyterhub', '_version.py')) as f:\n exec(f.read(), {}, ns)\n\n\npackages = []\nfor d, _, _ in os.walk('jupyterhub'):\n if os.path.exists(pjoin(d, '__init__.py')):\n packages.append(d.replace(os.path.sep, '.'))\n\nsetup_args = dict(\n name = 'jupyterhub',\n scripts = glob(pjoin('scripts', '*')),\n packages = packages,\n # dummy, so that install_data doesn't get skipped\n # this will be overridden when bower is run anyway\n data_files = get_data_files() or ['dummy'],\n package_data = get_package_data(),\n version = ns['__version__'],\n description = \"JupyterHub: A multi-user server for Jupyter notebooks\",\n long_description = \"See https://jupyterhub.readthedocs.io for more info.\",\n author = \"Jupyter Development Team\",\n author_email = \"[email protected]\",\n url = \"http://jupyter.org\",\n license = \"BSD\",\n platforms = \"Linux, Mac OS X\",\n keywords = ['Interactive', 'Interpreter', 'Shell', 'Web'],\n python_requires = \">=3.5\",\n classifiers = [\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: BSD License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n ],\n)\n\n#---------------------------------------------------------------------------\n# custom distutils commands\n#---------------------------------------------------------------------------\n\n# imports here, so they are after setuptools import if there was one\nfrom distutils.cmd import Command\nfrom distutils.command.build_py import build_py\nfrom distutils.command.sdist import sdist\n\n\ndef mtime(path):\n \"\"\"shorthand for mtime\"\"\"\n return os.stat(path).st_mtime\n\n\nclass BaseCommand(Command):\n \"\"\"Dumb empty command because Command needs subclasses to override too much\"\"\"\n user_options = []\n\n def initialize_options(self):\n pass\n\n def finalize_options(self):\n pass\n\n def get_inputs(self):\n return []\n\n def get_outputs(self):\n return []\n\n\nclass NPM(BaseCommand):\n description = \"fetch static client-side components with bower\"\n\n user_options = []\n node_modules = pjoin(here, 'node_modules')\n bower_dir = pjoin(static, 'components')\n\n def should_run(self):\n if not shutil.which('npm'):\n print(\"npm unavailable\", file=sys.stderr)\n return False\n if not os.path.exists(self.bower_dir):\n return True\n if not os.path.exists(self.node_modules):\n return True\n if mtime(self.bower_dir) < mtime(self.node_modules):\n return True\n return mtime(self.node_modules) < mtime(pjoin(here, 'package.json'))\n\n def run(self):\n if not self.should_run():\n print(\"npm dependencies up to date\")\n return\n\n print(\"installing js dependencies with npm\")\n check_call(['npm', 'install', '--progress=false', '--unsafe-perm'], cwd=here, shell=shell)\n os.utime(self.node_modules)\n\n os.utime(self.bower_dir)\n # update data-files in case this created new files\n self.distribution.data_files = get_data_files()\n\n\nclass CSS(BaseCommand):\n description = \"compile CSS from LESS\"\n\n def should_run(self):\n \"\"\"Does less need to run?\"\"\"\n # from IPython.html.tasks.py\n\n css_targets = [pjoin(static, 'css', 'style.min.css')]\n css_maps = [t + '.map' for t in css_targets]\n targets = css_targets + css_maps\n if not all(os.path.exists(t) for t in targets):\n # some generated files don't exist\n return True\n earliest_target = sorted(mtime(t) for t in targets)[0]\n\n # check if any .less files are newer than the generated targets\n for (dirpath, dirnames, filenames) in os.walk(static):\n for f in filenames:\n if f.endswith('.less'):\n path = pjoin(static, dirpath, f)\n timestamp = mtime(path)\n if timestamp > earliest_target:\n return True\n\n return False\n\n def run(self):\n if not self.should_run():\n print(\"CSS up-to-date\")\n return\n\n self.run_command('js')\n print(\"Building css with less\")\n\n style_less = pjoin(static, 'less', 'style.less')\n style_css = pjoin(static, 'css', 'style.min.css')\n sourcemap = style_css + '.map'\n\n args = [\n 'npm', 'run', 'lessc', '--', '--clean-css',\n '--source-map-basepath={}'.format(static),\n '--source-map={}'.format(sourcemap),\n '--source-map-rootpath=../',\n style_less, style_css,\n ]\n try:\n check_call(args, cwd=here, shell=shell)\n except OSError as e:\n print(\"Failed to run lessc: %s\" % e, file=sys.stderr)\n print(\"You can install js dependencies with `npm install`\", file=sys.stderr)\n raise\n # update data-files in case this created new files\n self.distribution.data_files = get_data_files()\n\n\ndef js_css_first(cls, strict=True):\n class Command(cls):\n def run(self):\n try:\n self.run_command('js')\n self.run_command('css')\n except Exception:\n if strict:\n raise\n else:\n pass\n return super().run()\n return Command\n\n\nclass bdist_egg_disabled(bdist_egg):\n \"\"\"Disabled version of bdist_egg\n\n Prevents setup.py install from performing setuptools' default easy_install,\n which it should never ever do.\n \"\"\"\n def run(self):\n sys.exit(\"Aborting implicit building of eggs. Use `pip install .` to install from source.\")\n\n\nsetup_args['cmdclass'] = {\n 'js': NPM,\n 'css': CSS,\n 'build_py': js_css_first(build_py, strict=is_repo),\n 'sdist': js_css_first(sdist, strict=True),\n 'bdist_egg': bdist_egg if 'bdist_egg' in sys.argv else bdist_egg_disabled,\n}\n\n\n# setuptools requirements\n\nsetup_args['zip_safe'] = False\nfrom setuptools.command.develop import develop\nclass develop_js_css(develop):\n def run(self):\n if not self.uninstall:\n self.distribution.run_command('js')\n self.distribution.run_command('css')\n develop.run(self)\nsetup_args['cmdclass']['develop'] = develop_js_css\nsetup_args['install_requires'] = install_requires = []\n\nwith open('requirements.txt') as f:\n for line in f.readlines():\n req = line.strip()\n if not req or req.startswith('#') or '://' in req:\n continue\n install_requires.append(req)\n\n#---------------------------------------------------------------------------\n# setup\n#---------------------------------------------------------------------------\n\ndef main():\n setup(**setup_args)\n\nif __name__ == '__main__':\n main()\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n# coding: utf-8\n\n# Copyright (c) Juptyer Development Team.\n# Distributed under the terms of the Modified BSD License.\n\n#-----------------------------------------------------------------------------\n# Minimal Python version sanity check (from IPython)\n#-----------------------------------------------------------------------------\n\nfrom __future__ import print_function\n\nimport os\nimport shutil\nimport sys\n\nv = sys.version_info\nif v[:2] < (3, 5):\n error = \"ERROR: JupyterHub requires Python version 3.5 or above.\"\n print(error, file=sys.stderr)\n sys.exit(1)\n\nshell = False\nif os.name in ('nt', 'dos'):\n shell = True\n warning = \"WARNING: Windows is not officially supported\"\n print(warning, file=sys.stderr)\n\n# At least we're on the python version we need, move on.\n\nimport os\nfrom glob import glob\nfrom subprocess import check_call\n\nfrom setuptools import setup\nfrom setuptools.command.bdist_egg import bdist_egg\n\npjoin = os.path.join\n\nhere = os.path.abspath(os.path.dirname(__file__))\nshare_jupyterhub = pjoin(here, 'share', 'jupyterhub')\nstatic = pjoin(share_jupyterhub, 'static')\n\nis_repo = os.path.exists(pjoin(here, '.git'))\n\n#---------------------------------------------------------------------------\n# Build basic package data, etc.\n#---------------------------------------------------------------------------\n\ndef get_data_files():\n \"\"\"Get data files in share/jupyter\"\"\"\n\n data_files = []\n ntrim = len(here + os.path.sep)\n\n for (d, dirs, filenames) in os.walk(share_jupyterhub):\n data_files.append((\n d[ntrim:],\n [ pjoin(d, f) for f in filenames ]\n ))\n return data_files\n\ndef get_package_data():\n \"\"\"Get package data\n\n (mostly alembic config)\n \"\"\"\n package_data = {}\n package_data['jupyterhub'] = [\n 'alembic.ini',\n 'alembic/*',\n 'alembic/versions/*',\n ]\n return package_data\n\nns = {}\nwith open(pjoin(here, 'jupyterhub', '_version.py')) as f:\n exec(f.read(), {}, ns)\n\n\npackages = []\nfor d, _, _ in os.walk('jupyterhub'):\n if os.path.exists(pjoin(d, '__init__.py')):\n packages.append(d.replace(os.path.sep, '.'))\n\nwith open('README.md', encoding=\"utf8\") as f:\n readme = f.read()\n\n\nsetup_args = dict(\n name = 'jupyterhub',\n scripts = glob(pjoin('scripts', '*')),\n packages = packages,\n # dummy, so that install_data doesn't get skipped\n # this will be overridden when bower is run anyway\n data_files = get_data_files() or ['dummy'],\n package_data = get_package_data(),\n version = ns['__version__'],\n description = \"JupyterHub: A multi-user server for Jupyter notebooks\",\n long_description = readme,\n long_description_content_type = 'text/markdown',\n author = \"Jupyter Development Team\",\n author_email = \"[email protected]\",\n url = \"https://jupyter.org\",\n license = \"BSD\",\n platforms = \"Linux, Mac OS X\",\n keywords = ['Interactive', 'Interpreter', 'Shell', 'Web'],\n python_requires = \">=3.5\",\n classifiers = [\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: BSD License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n ],\n project_urls = {\n 'Documentation': 'https://jupyterhub.readthedocs.io',\n 'Funding': 'https://jupyter.org/about',\n 'Source': 'https://github.com/jupyterhub/jupyterhub/',\n 'Tracker': 'https://github.com/jupyterhub/jupyterhub/issues',\n },\n)\n\n#---------------------------------------------------------------------------\n# custom distutils commands\n#---------------------------------------------------------------------------\n\n# imports here, so they are after setuptools import if there was one\nfrom distutils.cmd import Command\nfrom distutils.command.build_py import build_py\nfrom distutils.command.sdist import sdist\n\n\ndef mtime(path):\n \"\"\"shorthand for mtime\"\"\"\n return os.stat(path).st_mtime\n\n\nclass BaseCommand(Command):\n \"\"\"Dumb empty command because Command needs subclasses to override too much\"\"\"\n user_options = []\n\n def initialize_options(self):\n pass\n\n def finalize_options(self):\n pass\n\n def get_inputs(self):\n return []\n\n def get_outputs(self):\n return []\n\n\nclass NPM(BaseCommand):\n description = \"fetch static client-side components with bower\"\n\n user_options = []\n node_modules = pjoin(here, 'node_modules')\n bower_dir = pjoin(static, 'components')\n\n def should_run(self):\n if not shutil.which('npm'):\n print(\"npm unavailable\", file=sys.stderr)\n return False\n if not os.path.exists(self.bower_dir):\n return True\n if not os.path.exists(self.node_modules):\n return True\n if mtime(self.bower_dir) < mtime(self.node_modules):\n return True\n return mtime(self.node_modules) < mtime(pjoin(here, 'package.json'))\n\n def run(self):\n if not self.should_run():\n print(\"npm dependencies up to date\")\n return\n\n print(\"installing js dependencies with npm\")\n check_call(['npm', 'install', '--progress=false', '--unsafe-perm'], cwd=here, shell=shell)\n os.utime(self.node_modules)\n\n os.utime(self.bower_dir)\n # update data-files in case this created new files\n self.distribution.data_files = get_data_files()\n\n\nclass CSS(BaseCommand):\n description = \"compile CSS from LESS\"\n\n def should_run(self):\n \"\"\"Does less need to run?\"\"\"\n # from IPython.html.tasks.py\n\n css_targets = [pjoin(static, 'css', 'style.min.css')]\n css_maps = [t + '.map' for t in css_targets]\n targets = css_targets + css_maps\n if not all(os.path.exists(t) for t in targets):\n # some generated files don't exist\n return True\n earliest_target = sorted(mtime(t) for t in targets)[0]\n\n # check if any .less files are newer than the generated targets\n for (dirpath, dirnames, filenames) in os.walk(static):\n for f in filenames:\n if f.endswith('.less'):\n path = pjoin(static, dirpath, f)\n timestamp = mtime(path)\n if timestamp > earliest_target:\n return True\n\n return False\n\n def run(self):\n if not self.should_run():\n print(\"CSS up-to-date\")\n return\n\n self.run_command('js')\n print(\"Building css with less\")\n\n style_less = pjoin(static, 'less', 'style.less')\n style_css = pjoin(static, 'css', 'style.min.css')\n sourcemap = style_css + '.map'\n\n args = [\n 'npm', 'run', 'lessc', '--', '--clean-css',\n '--source-map-basepath={}'.format(static),\n '--source-map={}'.format(sourcemap),\n '--source-map-rootpath=../',\n style_less, style_css,\n ]\n try:\n check_call(args, cwd=here, shell=shell)\n except OSError as e:\n print(\"Failed to run lessc: %s\" % e, file=sys.stderr)\n print(\"You can install js dependencies with `npm install`\", file=sys.stderr)\n raise\n # update data-files in case this created new files\n self.distribution.data_files = get_data_files()\n\n\ndef js_css_first(cls, strict=True):\n class Command(cls):\n def run(self):\n try:\n self.run_command('js')\n self.run_command('css')\n except Exception:\n if strict:\n raise\n else:\n pass\n return super().run()\n return Command\n\n\nclass bdist_egg_disabled(bdist_egg):\n \"\"\"Disabled version of bdist_egg\n\n Prevents setup.py install from performing setuptools' default easy_install,\n which it should never ever do.\n \"\"\"\n def run(self):\n sys.exit(\"Aborting implicit building of eggs. Use `pip install .` to install from source.\")\n\n\nsetup_args['cmdclass'] = {\n 'js': NPM,\n 'css': CSS,\n 'build_py': js_css_first(build_py, strict=is_repo),\n 'sdist': js_css_first(sdist, strict=True),\n 'bdist_egg': bdist_egg if 'bdist_egg' in sys.argv else bdist_egg_disabled,\n}\n\n\n# setuptools requirements\n\nsetup_args['zip_safe'] = False\nfrom setuptools.command.develop import develop\nclass develop_js_css(develop):\n def run(self):\n if not self.uninstall:\n self.distribution.run_command('js')\n self.distribution.run_command('css')\n develop.run(self)\nsetup_args['cmdclass']['develop'] = develop_js_css\nsetup_args['install_requires'] = install_requires = []\n\nwith open('requirements.txt') as f:\n for line in f.readlines():\n req = line.strip()\n if not req or req.startswith('#') or '://' in req:\n continue\n install_requires.append(req)\n\n#---------------------------------------------------------------------------\n# setup\n#---------------------------------------------------------------------------\n\ndef main():\n setup(**setup_args)\n\nif __name__ == '__main__':\n main()\n", "path": "setup.py"}]}
| 3,157 | 396 |
gh_patches_debug_35715
|
rasdani/github-patches
|
git_diff
|
wandb__wandb-447
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
fastai: wandb not logging fastai validation loss with default args
`wandb --version && python --version && uname`
* Weights and Biases version: 0.8.5
* Python version: 3.7.3
* Operating System: Linux
* fast.ai version: 1.0.55
### Description
When you use the `fastai` callback `WandbCallback` with the default arguments you get some errors and the validation loss is not logged.
### What I Did
1. Run the following code in a jupyter notebook
```
import wandb
import fastai
from wandb.fastai import WandbCallback
from fastai.vision import *
from functools import partial
print(f'wandb version: {wandb.__version__}, fastai version: {fastai.__version__}')
wandb.init(project="fastai-test")
path = untar_data(URLs.MNIST_SAMPLE)
data = ImageDataBunch.from_folder(path)
learn = cnn_learner(data, models.resnet18, metrics=accuracy, callback_fns=WandbCallback)
learn.fit_one_cycle(1, 1e-2)
```
2. You'll get the following error:

```
/home/fastai/anaconda3/envs/wandbtest/lib/python3.7/site-packages/fastai/callbacks/tracker.py:50: UserWarning: <class 'wandb.fastai.WandbCallback'> conditioned on metric `val_loss` which is not available. Available metrics are: train_loss, valid_loss, accuracy
warn(f'{self.__class__} conditioned on metric `{self.monitor}` which is not available. Available metrics are: {", ".join(map(str, self.learn.recorder.names[1:-1]))}')
```
3. To fix it you can tell it to monitor `valid_loss` by changing the callback to be:
``` python
learn = cnn_learner(data, models.resnet18, metrics=accuracy, callback_fns=partial(WandbCallback, monitor='valid_loss'))
```
I'm guessing that the name of the loss changed with fast.ai at some point.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `wandb/fastai/__init__.py`
Content:
```
1 '''W&B Callback for fast.ai
2
3 This module hooks fast.ai Learners to Weights & Biases through a callback.
4 Requested logged data can be configured through the callback constructor.
5
6 Examples:
7 WandbCallback can be used when initializing the Learner::
8
9 from wandb.fastai import WandbCallback
10 [...]
11 learn = Learner(data, ..., callback_fns=WandbCallback)
12 learn.fit(epochs)
13
14 Custom parameters can be given using functools.partial::
15
16 from wandb.fastai import WandbCallback
17 from functools import partial
18 [...]
19 learn = Learner(data, ..., callback_fns=partial(WandbCallback, ...))
20 learn.fit(epochs)
21
22 Finally, it is possible to use WandbCallback only when starting
23 training. In this case it must be instantiated::
24
25 learn.fit(..., callbacks=WandbCallback())
26
27 or, with custom parameters::
28
29 learn.fit(..., callbacks=WandBCallback(learn, ...))
30 '''
31 import wandb
32 from fastai.callbacks import TrackerCallback
33 from pathlib import Path
34 import random
35 try:
36 import matplotlib
37 matplotlib.use('Agg') # non-interactive backend (avoid tkinter issues)
38 import matplotlib.pyplot as plt
39 except:
40 print('Warning: matplotlib required if logging sample image predictions')
41
42
43 class WandbCallback(TrackerCallback):
44
45 # Record if watch has been called previously (even in another instance)
46 watch_called = False
47
48 def __init__(self,
49 learn,
50 log="gradients",
51 save_model=True,
52 monitor='val_loss',
53 mode='auto',
54 input_type=None,
55 validation_data=None,
56 predictions=36):
57 """WandB fast.ai Callback
58
59 Automatically saves model topology, losses & metrics.
60 Optionally logs weights, gradients, sample predictions and best trained model.
61
62 Args:
63 learn (fastai.basic_train.Learner): the fast.ai learner to hook.
64 log (str): "gradients", "parameters", "all", or None. Losses & metrics are always logged.
65 save_model (bool): save model at the end of each epoch.
66 monitor (str): metric to monitor for saving best model.
67 mode (str): "auto", "min" or "max" to compare "monitor" values and define best model.
68 input_type (str): "images" or None. Used to display sample predictions.
69 validation_data (list): data used for sample predictions if input_type is set.
70 predictions (int): number of predictions to make if input_type is set and validation_data is None.
71 """
72
73 # Check if wandb.init has been called
74 if wandb.run is None:
75 raise ValueError(
76 'You must call wandb.init() before WandbCallback()')
77
78 # Adapted from fast.ai "SaveModelCallback"
79 super().__init__(learn, monitor=monitor, mode=mode)
80 self.save_model = save_model
81 self.model_path = Path(wandb.run.dir) / 'bestmodel.pth'
82
83 self.log = log
84 self.input_type = input_type
85 self.best = None
86
87 # Select items for sample predictions to see evolution along training
88 self.validation_data = validation_data
89 if input_type and not self.validation_data:
90 predictions = min(predictions, len(learn.data.valid_ds))
91 indices = random.sample(range(len(learn.data.valid_ds)),
92 predictions)
93 self.validation_data = [learn.data.valid_ds[i] for i in indices]
94
95 def on_train_begin(self, **kwargs):
96 "Call watch method to log model topology, gradients & weights"
97
98 # Set self.best, method inherited from "TrackerCallback" by "SaveModelCallback"
99 super().on_train_begin()
100
101 # Ensure we don't call "watch" multiple times
102 if not WandbCallback.watch_called:
103 WandbCallback.watch_called = True
104
105 # Logs model topology and optionally gradients and weights
106 wandb.watch(self.learn.model, log=self.log)
107
108 def on_epoch_end(self, epoch, smooth_loss, last_metrics, **kwargs):
109 "Logs training loss, validation loss and custom metrics & log prediction samples & save model"
110
111 if self.save_model:
112 # Adapted from fast.ai "SaveModelCallback"
113 current = self.get_monitor_value()
114 if current is not None and self.operator(current, self.best):
115 print(
116 'Better model found at epoch {} with {} value: {}.'.format(
117 epoch, self.monitor, current))
118 self.best = current
119
120 # Save within wandb folder
121 with self.model_path.open('wb') as model_file:
122 self.learn.save(model_file)
123
124 # Log sample predictions
125 if self.validation_data:
126 pred_log = []
127
128 for x, y in self.validation_data:
129 pred = self.learn.predict(x)
130
131 # scalar -> likely to be a category
132 if not pred[1].shape:
133 pred_log.append(
134 wandb.Image(
135 x.data,
136 caption='Ground Truth: {}\nPrediction: {}'.format(
137 y, pred[0])))
138
139 # most vision datasets have a "show" function we can use
140 elif hasattr(x, "show"):
141 # log input data
142 pred_log.append(
143 wandb.Image(x.data, caption='Input data', grouping=3))
144
145 # log label and prediction
146 for im, capt in (y, "Ground Truth"), (pred[0],
147 "Prediction"):
148 # Resize plot to image resolution
149 # from https://stackoverflow.com/a/13714915
150 my_dpi = 100
151 fig = plt.figure(frameon=False, dpi=my_dpi)
152 h, w = x.size
153 fig.set_size_inches(w / my_dpi, h / my_dpi)
154 ax = plt.Axes(fig, [0., 0., 1., 1.])
155 ax.set_axis_off()
156 fig.add_axes(ax)
157
158 # Superpose label or prediction to input image
159 x.show(ax=ax, y=im)
160 pred_log.append(wandb.Image(fig, caption=capt))
161 plt.close(fig)
162
163 # likely to be an image
164 elif hasattr(y, "shape") and (
165 (len(y.shape) == 2) or
166 (len(y.shape) == 3 and y.shape[0] in [1, 3, 4])):
167
168 pred_log.extend([
169 wandb.Image(x.data, caption='Input data', grouping=3),
170 wandb.Image(y.data, caption='Ground Truth'),
171 wandb.Image(pred[0].data, caption='Prediction')
172 ])
173
174 # we just log input data
175 else:
176 pred_log.append(wandb.Image(x.data, caption='Input data'))
177
178 wandb.log({"Prediction Samples": pred_log}, commit=False)
179
180 # Log losses & metrics
181 # Adapted from fast.ai "CSVLogger"
182 logs = {
183 name: stat
184 for name, stat in list(
185 zip(self.learn.recorder.names, [epoch, smooth_loss] +
186 last_metrics))[1:]
187 }
188 wandb.log(logs)
189
190 def on_train_end(self, **kwargs):
191 "Load the best model."
192
193 if self.save_model:
194 # Adapted from fast.ai "SaveModelCallback"
195 if self.model_path.is_file():
196 with self.model_path.open('rb') as model_file:
197 self.learn.load(model_file, purge=False)
198 print('Loaded best saved model from {}'.format(
199 self.model_path))
200
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/wandb/fastai/__init__.py b/wandb/fastai/__init__.py
--- a/wandb/fastai/__init__.py
+++ b/wandb/fastai/__init__.py
@@ -29,6 +29,7 @@
learn.fit(..., callbacks=WandBCallback(learn, ...))
'''
import wandb
+import fastai
from fastai.callbacks import TrackerCallback
from pathlib import Path
import random
@@ -49,7 +50,7 @@
learn,
log="gradients",
save_model=True,
- monitor='val_loss',
+ monitor=None,
mode='auto',
input_type=None,
validation_data=None,
@@ -63,7 +64,7 @@
learn (fastai.basic_train.Learner): the fast.ai learner to hook.
log (str): "gradients", "parameters", "all", or None. Losses & metrics are always logged.
save_model (bool): save model at the end of each epoch.
- monitor (str): metric to monitor for saving best model.
+ monitor (str): metric to monitor for saving best model. None uses default TrackerCallback monitor value.
mode (str): "auto", "min" or "max" to compare "monitor" values and define best model.
input_type (str): "images" or None. Used to display sample predictions.
validation_data (list): data used for sample predictions if input_type is set.
@@ -76,7 +77,11 @@
'You must call wandb.init() before WandbCallback()')
# Adapted from fast.ai "SaveModelCallback"
- super().__init__(learn, monitor=monitor, mode=mode)
+ if monitor is None:
+ # use default TrackerCallback monitor value
+ super().__init__(learn, mode=mode)
+ else:
+ super().__init__(learn, monitor=monitor, mode=mode)
self.save_model = save_model
self.model_path = Path(wandb.run.dir) / 'bestmodel.pth'
|
{"golden_diff": "diff --git a/wandb/fastai/__init__.py b/wandb/fastai/__init__.py\n--- a/wandb/fastai/__init__.py\n+++ b/wandb/fastai/__init__.py\n@@ -29,6 +29,7 @@\n learn.fit(..., callbacks=WandBCallback(learn, ...))\n '''\n import wandb\n+import fastai\n from fastai.callbacks import TrackerCallback\n from pathlib import Path\n import random\n@@ -49,7 +50,7 @@\n learn,\n log=\"gradients\",\n save_model=True,\n- monitor='val_loss',\n+ monitor=None,\n mode='auto',\n input_type=None,\n validation_data=None,\n@@ -63,7 +64,7 @@\n learn (fastai.basic_train.Learner): the fast.ai learner to hook.\n log (str): \"gradients\", \"parameters\", \"all\", or None. Losses & metrics are always logged.\n save_model (bool): save model at the end of each epoch.\n- monitor (str): metric to monitor for saving best model.\n+ monitor (str): metric to monitor for saving best model. None uses default TrackerCallback monitor value.\n mode (str): \"auto\", \"min\" or \"max\" to compare \"monitor\" values and define best model.\n input_type (str): \"images\" or None. Used to display sample predictions.\n validation_data (list): data used for sample predictions if input_type is set.\n@@ -76,7 +77,11 @@\n 'You must call wandb.init() before WandbCallback()')\n \n # Adapted from fast.ai \"SaveModelCallback\"\n- super().__init__(learn, monitor=monitor, mode=mode)\n+ if monitor is None:\n+ # use default TrackerCallback monitor value\n+ super().__init__(learn, mode=mode)\n+ else:\n+ super().__init__(learn, monitor=monitor, mode=mode)\n self.save_model = save_model\n self.model_path = Path(wandb.run.dir) / 'bestmodel.pth'\n", "issue": "fastai: wandb not logging fastai validation loss with default args\n`wandb --version && python --version && uname`\r\n\r\n* Weights and Biases version: 0.8.5\r\n* Python version: 3.7.3\r\n* Operating System: Linux\r\n* fast.ai version: 1.0.55\r\n\r\n### Description\r\n\r\nWhen you use the `fastai` callback `WandbCallback` with the default arguments you get some errors and the validation loss is not logged.\r\n\r\n### What I Did\r\n\r\n1. Run the following code in a jupyter notebook\r\n```\r\nimport wandb\r\nimport fastai\r\nfrom wandb.fastai import WandbCallback\r\nfrom fastai.vision import *\r\nfrom functools import partial\r\nprint(f'wandb version: {wandb.__version__}, fastai version: {fastai.__version__}')\r\nwandb.init(project=\"fastai-test\")\r\npath = untar_data(URLs.MNIST_SAMPLE)\r\ndata = ImageDataBunch.from_folder(path)\r\nlearn = cnn_learner(data, models.resnet18, metrics=accuracy, callback_fns=WandbCallback)\r\nlearn.fit_one_cycle(1, 1e-2)\r\n```\r\n\r\n2. You'll get the following error:\r\n\r\n\r\n\r\n```\r\n/home/fastai/anaconda3/envs/wandbtest/lib/python3.7/site-packages/fastai/callbacks/tracker.py:50: UserWarning: <class 'wandb.fastai.WandbCallback'> conditioned on metric `val_loss` which is not available. Available metrics are: train_loss, valid_loss, accuracy\r\n warn(f'{self.__class__} conditioned on metric `{self.monitor}` which is not available. Available metrics are: {\", \".join(map(str, self.learn.recorder.names[1:-1]))}')\r\n```\r\n\r\n3. To fix it you can tell it to monitor `valid_loss` by changing the callback to be:\r\n\r\n``` python\r\nlearn = cnn_learner(data, models.resnet18, metrics=accuracy, callback_fns=partial(WandbCallback, monitor='valid_loss'))\r\n```\r\n\r\nI'm guessing that the name of the loss changed with fast.ai at some point.\r\n\n", "before_files": [{"content": "'''W&B Callback for fast.ai\n\nThis module hooks fast.ai Learners to Weights & Biases through a callback.\nRequested logged data can be configured through the callback constructor.\n\nExamples:\n WandbCallback can be used when initializing the Learner::\n\n from wandb.fastai import WandbCallback\n [...]\n learn = Learner(data, ..., callback_fns=WandbCallback)\n learn.fit(epochs)\n \n Custom parameters can be given using functools.partial::\n\n from wandb.fastai import WandbCallback\n from functools import partial\n [...]\n learn = Learner(data, ..., callback_fns=partial(WandbCallback, ...))\n learn.fit(epochs)\n\n Finally, it is possible to use WandbCallback only when starting\n training. In this case it must be instantiated::\n\n learn.fit(..., callbacks=WandbCallback())\n\n or, with custom parameters::\n\n learn.fit(..., callbacks=WandBCallback(learn, ...))\n'''\nimport wandb\nfrom fastai.callbacks import TrackerCallback\nfrom pathlib import Path\nimport random\ntry:\n import matplotlib\n matplotlib.use('Agg') # non-interactive backend (avoid tkinter issues)\n import matplotlib.pyplot as plt\nexcept:\n print('Warning: matplotlib required if logging sample image predictions')\n\n\nclass WandbCallback(TrackerCallback):\n\n # Record if watch has been called previously (even in another instance)\n watch_called = False\n\n def __init__(self,\n learn,\n log=\"gradients\",\n save_model=True,\n monitor='val_loss',\n mode='auto',\n input_type=None,\n validation_data=None,\n predictions=36):\n \"\"\"WandB fast.ai Callback\n\n Automatically saves model topology, losses & metrics.\n Optionally logs weights, gradients, sample predictions and best trained model.\n\n Args:\n learn (fastai.basic_train.Learner): the fast.ai learner to hook.\n log (str): \"gradients\", \"parameters\", \"all\", or None. Losses & metrics are always logged.\n save_model (bool): save model at the end of each epoch.\n monitor (str): metric to monitor for saving best model.\n mode (str): \"auto\", \"min\" or \"max\" to compare \"monitor\" values and define best model.\n input_type (str): \"images\" or None. Used to display sample predictions.\n validation_data (list): data used for sample predictions if input_type is set.\n predictions (int): number of predictions to make if input_type is set and validation_data is None.\n \"\"\"\n\n # Check if wandb.init has been called\n if wandb.run is None:\n raise ValueError(\n 'You must call wandb.init() before WandbCallback()')\n\n # Adapted from fast.ai \"SaveModelCallback\"\n super().__init__(learn, monitor=monitor, mode=mode)\n self.save_model = save_model\n self.model_path = Path(wandb.run.dir) / 'bestmodel.pth'\n\n self.log = log\n self.input_type = input_type\n self.best = None\n\n # Select items for sample predictions to see evolution along training\n self.validation_data = validation_data\n if input_type and not self.validation_data:\n predictions = min(predictions, len(learn.data.valid_ds))\n indices = random.sample(range(len(learn.data.valid_ds)),\n predictions)\n self.validation_data = [learn.data.valid_ds[i] for i in indices]\n\n def on_train_begin(self, **kwargs):\n \"Call watch method to log model topology, gradients & weights\"\n\n # Set self.best, method inherited from \"TrackerCallback\" by \"SaveModelCallback\"\n super().on_train_begin()\n\n # Ensure we don't call \"watch\" multiple times\n if not WandbCallback.watch_called:\n WandbCallback.watch_called = True\n\n # Logs model topology and optionally gradients and weights\n wandb.watch(self.learn.model, log=self.log)\n\n def on_epoch_end(self, epoch, smooth_loss, last_metrics, **kwargs):\n \"Logs training loss, validation loss and custom metrics & log prediction samples & save model\"\n\n if self.save_model:\n # Adapted from fast.ai \"SaveModelCallback\"\n current = self.get_monitor_value()\n if current is not None and self.operator(current, self.best):\n print(\n 'Better model found at epoch {} with {} value: {}.'.format(\n epoch, self.monitor, current))\n self.best = current\n\n # Save within wandb folder\n with self.model_path.open('wb') as model_file:\n self.learn.save(model_file)\n\n # Log sample predictions\n if self.validation_data:\n pred_log = []\n\n for x, y in self.validation_data:\n pred = self.learn.predict(x)\n\n # scalar -> likely to be a category\n if not pred[1].shape:\n pred_log.append(\n wandb.Image(\n x.data,\n caption='Ground Truth: {}\\nPrediction: {}'.format(\n y, pred[0])))\n\n # most vision datasets have a \"show\" function we can use\n elif hasattr(x, \"show\"):\n # log input data\n pred_log.append(\n wandb.Image(x.data, caption='Input data', grouping=3))\n\n # log label and prediction\n for im, capt in (y, \"Ground Truth\"), (pred[0],\n \"Prediction\"):\n # Resize plot to image resolution\n # from https://stackoverflow.com/a/13714915\n my_dpi = 100\n fig = plt.figure(frameon=False, dpi=my_dpi)\n h, w = x.size\n fig.set_size_inches(w / my_dpi, h / my_dpi)\n ax = plt.Axes(fig, [0., 0., 1., 1.])\n ax.set_axis_off()\n fig.add_axes(ax)\n\n # Superpose label or prediction to input image\n x.show(ax=ax, y=im)\n pred_log.append(wandb.Image(fig, caption=capt))\n plt.close(fig)\n\n # likely to be an image\n elif hasattr(y, \"shape\") and (\n (len(y.shape) == 2) or\n (len(y.shape) == 3 and y.shape[0] in [1, 3, 4])):\n\n pred_log.extend([\n wandb.Image(x.data, caption='Input data', grouping=3),\n wandb.Image(y.data, caption='Ground Truth'),\n wandb.Image(pred[0].data, caption='Prediction')\n ])\n\n # we just log input data\n else:\n pred_log.append(wandb.Image(x.data, caption='Input data'))\n\n wandb.log({\"Prediction Samples\": pred_log}, commit=False)\n\n # Log losses & metrics\n # Adapted from fast.ai \"CSVLogger\"\n logs = {\n name: stat\n for name, stat in list(\n zip(self.learn.recorder.names, [epoch, smooth_loss] +\n last_metrics))[1:]\n }\n wandb.log(logs)\n\n def on_train_end(self, **kwargs):\n \"Load the best model.\"\n\n if self.save_model:\n # Adapted from fast.ai \"SaveModelCallback\"\n if self.model_path.is_file():\n with self.model_path.open('rb') as model_file:\n self.learn.load(model_file, purge=False)\n print('Loaded best saved model from {}'.format(\n self.model_path))\n", "path": "wandb/fastai/__init__.py"}], "after_files": [{"content": "'''W&B Callback for fast.ai\n\nThis module hooks fast.ai Learners to Weights & Biases through a callback.\nRequested logged data can be configured through the callback constructor.\n\nExamples:\n WandbCallback can be used when initializing the Learner::\n\n from wandb.fastai import WandbCallback\n [...]\n learn = Learner(data, ..., callback_fns=WandbCallback)\n learn.fit(epochs)\n \n Custom parameters can be given using functools.partial::\n\n from wandb.fastai import WandbCallback\n from functools import partial\n [...]\n learn = Learner(data, ..., callback_fns=partial(WandbCallback, ...))\n learn.fit(epochs)\n\n Finally, it is possible to use WandbCallback only when starting\n training. In this case it must be instantiated::\n\n learn.fit(..., callbacks=WandbCallback())\n\n or, with custom parameters::\n\n learn.fit(..., callbacks=WandBCallback(learn, ...))\n'''\nimport wandb\nimport fastai\nfrom fastai.callbacks import TrackerCallback\nfrom pathlib import Path\nimport random\ntry:\n import matplotlib\n matplotlib.use('Agg') # non-interactive backend (avoid tkinter issues)\n import matplotlib.pyplot as plt\nexcept:\n print('Warning: matplotlib required if logging sample image predictions')\n\n\nclass WandbCallback(TrackerCallback):\n\n # Record if watch has been called previously (even in another instance)\n watch_called = False\n\n def __init__(self,\n learn,\n log=\"gradients\",\n save_model=True,\n monitor=None,\n mode='auto',\n input_type=None,\n validation_data=None,\n predictions=36):\n \"\"\"WandB fast.ai Callback\n\n Automatically saves model topology, losses & metrics.\n Optionally logs weights, gradients, sample predictions and best trained model.\n\n Args:\n learn (fastai.basic_train.Learner): the fast.ai learner to hook.\n log (str): \"gradients\", \"parameters\", \"all\", or None. Losses & metrics are always logged.\n save_model (bool): save model at the end of each epoch.\n monitor (str): metric to monitor for saving best model. None uses default TrackerCallback monitor value.\n mode (str): \"auto\", \"min\" or \"max\" to compare \"monitor\" values and define best model.\n input_type (str): \"images\" or None. Used to display sample predictions.\n validation_data (list): data used for sample predictions if input_type is set.\n predictions (int): number of predictions to make if input_type is set and validation_data is None.\n \"\"\"\n\n # Check if wandb.init has been called\n if wandb.run is None:\n raise ValueError(\n 'You must call wandb.init() before WandbCallback()')\n\n # Adapted from fast.ai \"SaveModelCallback\"\n if monitor is None:\n # use default TrackerCallback monitor value\n super().__init__(learn, mode=mode)\n else:\n super().__init__(learn, monitor=monitor, mode=mode)\n self.save_model = save_model\n self.model_path = Path(wandb.run.dir) / 'bestmodel.pth'\n\n self.log = log\n self.input_type = input_type\n self.best = None\n\n # Select items for sample predictions to see evolution along training\n self.validation_data = validation_data\n if input_type and not self.validation_data:\n predictions = min(predictions, len(learn.data.valid_ds))\n indices = random.sample(range(len(learn.data.valid_ds)),\n predictions)\n self.validation_data = [learn.data.valid_ds[i] for i in indices]\n\n def on_train_begin(self, **kwargs):\n \"Call watch method to log model topology, gradients & weights\"\n\n # Set self.best, method inherited from \"TrackerCallback\" by \"SaveModelCallback\"\n super().on_train_begin()\n\n # Ensure we don't call \"watch\" multiple times\n if not WandbCallback.watch_called:\n WandbCallback.watch_called = True\n\n # Logs model topology and optionally gradients and weights\n wandb.watch(self.learn.model, log=self.log)\n\n def on_epoch_end(self, epoch, smooth_loss, last_metrics, **kwargs):\n \"Logs training loss, validation loss and custom metrics & log prediction samples & save model\"\n\n if self.save_model:\n # Adapted from fast.ai \"SaveModelCallback\"\n current = self.get_monitor_value()\n if current is not None and self.operator(current, self.best):\n print(\n 'Better model found at epoch {} with {} value: {}.'.format(\n epoch, self.monitor, current))\n self.best = current\n\n # Save within wandb folder\n with self.model_path.open('wb') as model_file:\n self.learn.save(model_file)\n\n # Log sample predictions\n if self.validation_data:\n pred_log = []\n\n for x, y in self.validation_data:\n pred = self.learn.predict(x)\n\n # scalar -> likely to be a category\n if not pred[1].shape:\n pred_log.append(\n wandb.Image(\n x.data,\n caption='Ground Truth: {}\\nPrediction: {}'.format(\n y, pred[0])))\n\n # most vision datasets have a \"show\" function we can use\n elif hasattr(x, \"show\"):\n # log input data\n pred_log.append(\n wandb.Image(x.data, caption='Input data', grouping=3))\n\n # log label and prediction\n for im, capt in (y, \"Ground Truth\"), (pred[0],\n \"Prediction\"):\n # Resize plot to image resolution\n # from https://stackoverflow.com/a/13714915\n my_dpi = 100\n fig = plt.figure(frameon=False, dpi=my_dpi)\n h, w = x.size\n fig.set_size_inches(w / my_dpi, h / my_dpi)\n ax = plt.Axes(fig, [0., 0., 1., 1.])\n ax.set_axis_off()\n fig.add_axes(ax)\n\n # Superpose label or prediction to input image\n x.show(ax=ax, y=im)\n pred_log.append(wandb.Image(fig, caption=capt))\n plt.close(fig)\n\n # likely to be an image\n elif hasattr(y, \"shape\") and (\n (len(y.shape) == 2) or\n (len(y.shape) == 3 and y.shape[0] in [1, 3, 4])):\n\n pred_log.extend([\n wandb.Image(x.data, caption='Input data', grouping=3),\n wandb.Image(y.data, caption='Ground Truth'),\n wandb.Image(pred[0].data, caption='Prediction')\n ])\n\n # we just log input data\n else:\n pred_log.append(wandb.Image(x.data, caption='Input data'))\n\n wandb.log({\"Prediction Samples\": pred_log}, commit=False)\n\n # Log losses & metrics\n # Adapted from fast.ai \"CSVLogger\"\n logs = {\n name: stat\n for name, stat in list(\n zip(self.learn.recorder.names, [epoch, smooth_loss] +\n last_metrics))[1:]\n }\n wandb.log(logs)\n\n def on_train_end(self, **kwargs):\n \"Load the best model.\"\n\n if self.save_model:\n # Adapted from fast.ai \"SaveModelCallback\"\n if self.model_path.is_file():\n with self.model_path.open('rb') as model_file:\n self.learn.load(model_file, purge=False)\n print('Loaded best saved model from {}'.format(\n self.model_path))\n", "path": "wandb/fastai/__init__.py"}]}
| 2,881 | 454 |
gh_patches_debug_36598
|
rasdani/github-patches
|
git_diff
|
getredash__redash-1944
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Redash Permissions not working for some use cases
### Issue Summary
Currently, when query owner grants permission to another user for a query, the user is still unable to perform the following tasks:
* change data source
* schedule the query
* add and save new visualisation
I believe the user should have the ability to do all the things that the owner could do once permission has been granted.
### Technical details:
* Redash Version: 1.0.3
* Browser/OS: Chrome
* How did you install Redash: AWS using the AMI
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `redash/handlers/visualizations.py`
Content:
```
1 import json
2 from flask import request
3
4 from redash import models
5 from redash.permissions import require_permission, require_admin_or_owner
6 from redash.handlers.base import BaseResource, get_object_or_404
7
8
9 class VisualizationListResource(BaseResource):
10 @require_permission('edit_query')
11 def post(self):
12 kwargs = request.get_json(force=True)
13
14 query = get_object_or_404(models.Query.get_by_id_and_org, kwargs.pop('query_id'), self.current_org)
15 require_admin_or_owner(query.user_id)
16
17 kwargs['options'] = json.dumps(kwargs['options'])
18 kwargs['query_rel'] = query
19
20 vis = models.Visualization(**kwargs)
21 models.db.session.add(vis)
22 models.db.session.commit()
23 d = vis.to_dict(with_query=False)
24 return d
25
26
27 class VisualizationResource(BaseResource):
28 @require_permission('edit_query')
29 def post(self, visualization_id):
30 vis = get_object_or_404(models.Visualization.get_by_id_and_org, visualization_id, self.current_org)
31 require_admin_or_owner(vis.query_rel.user_id)
32
33 kwargs = request.get_json(force=True)
34 if 'options' in kwargs:
35 kwargs['options'] = json.dumps(kwargs['options'])
36
37 kwargs.pop('id', None)
38 kwargs.pop('query_id', None)
39
40 self.update_model(vis, kwargs)
41 d = vis.to_dict(with_query=False)
42 models.db.session.commit()
43 return d
44
45 @require_permission('edit_query')
46 def delete(self, visualization_id):
47 vis = get_object_or_404(models.Visualization.get_by_id_and_org, visualization_id, self.current_org)
48 require_admin_or_owner(vis.query_rel.user_id)
49 models.db.session.delete(vis)
50 models.db.session.commit()
51
```
Path: `redash/permissions.py`
Content:
```
1 from flask_login import current_user
2 from flask_restful import abort
3 import functools
4 from funcy import flatten
5
6 view_only = True
7 not_view_only = False
8
9 ACCESS_TYPE_VIEW = 'view'
10 ACCESS_TYPE_MODIFY = 'modify'
11 ACCESS_TYPE_DELETE = 'delete'
12
13 ACCESS_TYPES = (ACCESS_TYPE_VIEW, ACCESS_TYPE_MODIFY, ACCESS_TYPE_DELETE)
14
15
16 def has_access(object_groups, user, need_view_only):
17 if 'admin' in user.permissions:
18 return True
19
20 matching_groups = set(object_groups.keys()).intersection(user.group_ids)
21
22 if not matching_groups:
23 return False
24
25 required_level = 1 if need_view_only else 2
26
27 group_level = 1 if all(flatten([object_groups[group] for group in matching_groups])) else 2
28
29 return required_level <= group_level
30
31
32 def require_access(object_groups, user, need_view_only):
33 if not has_access(object_groups, user, need_view_only):
34 abort(403)
35
36
37 class require_permissions(object):
38 def __init__(self, permissions):
39 self.permissions = permissions
40
41 def __call__(self, fn):
42 @functools.wraps(fn)
43 def decorated(*args, **kwargs):
44 has_permissions = current_user.has_permissions(self.permissions)
45
46 if has_permissions:
47 return fn(*args, **kwargs)
48 else:
49 abort(403)
50
51 return decorated
52
53
54 def require_permission(permission):
55 return require_permissions((permission,))
56
57
58 def require_admin(fn):
59 return require_permission('admin')(fn)
60
61
62 def require_super_admin(fn):
63 return require_permission('super_admin')(fn)
64
65
66 def has_permission_or_owner(permission, object_owner_id):
67 return int(object_owner_id) == current_user.id or current_user.has_permission(permission)
68
69
70 def is_admin_or_owner(object_owner_id):
71 return has_permission_or_owner('admin', object_owner_id)
72
73
74 def require_permission_or_owner(permission, object_owner_id):
75 if not has_permission_or_owner(permission, object_owner_id):
76 abort(403)
77
78
79 def require_admin_or_owner(object_owner_id):
80 if not is_admin_or_owner(object_owner_id):
81 abort(403, message="You don't have permission to edit this resource.")
82
83
84 def can_modify(obj, user):
85 return is_admin_or_owner(obj.user_id) or user.has_access(obj, ACCESS_TYPE_MODIFY)
86
87
88 def require_object_modify_permission(obj, user):
89 if not can_modify(obj, user):
90 abort(403)
91
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/redash/handlers/visualizations.py b/redash/handlers/visualizations.py
--- a/redash/handlers/visualizations.py
+++ b/redash/handlers/visualizations.py
@@ -1,9 +1,12 @@
import json
+
from flask import request
from redash import models
-from redash.permissions import require_permission, require_admin_or_owner
from redash.handlers.base import BaseResource, get_object_or_404
+from redash.permissions import (require_admin_or_owner,
+ require_object_modify_permission,
+ require_permission)
class VisualizationListResource(BaseResource):
@@ -12,7 +15,7 @@
kwargs = request.get_json(force=True)
query = get_object_or_404(models.Query.get_by_id_and_org, kwargs.pop('query_id'), self.current_org)
- require_admin_or_owner(query.user_id)
+ require_object_modify_permission(query, self.current_user)
kwargs['options'] = json.dumps(kwargs['options'])
kwargs['query_rel'] = query
@@ -28,7 +31,7 @@
@require_permission('edit_query')
def post(self, visualization_id):
vis = get_object_or_404(models.Visualization.get_by_id_and_org, visualization_id, self.current_org)
- require_admin_or_owner(vis.query_rel.user_id)
+ require_object_modify_permission(vis.query_rel, self.current_user)
kwargs = request.get_json(force=True)
if 'options' in kwargs:
@@ -45,6 +48,6 @@
@require_permission('edit_query')
def delete(self, visualization_id):
vis = get_object_or_404(models.Visualization.get_by_id_and_org, visualization_id, self.current_org)
- require_admin_or_owner(vis.query_rel.user_id)
+ require_object_modify_permission(vis.query_rel, self.current_user)
models.db.session.delete(vis)
models.db.session.commit()
diff --git a/redash/permissions.py b/redash/permissions.py
--- a/redash/permissions.py
+++ b/redash/permissions.py
@@ -1,6 +1,7 @@
+import functools
+
from flask_login import current_user
from flask_restful import abort
-import functools
from funcy import flatten
view_only = True
|
{"golden_diff": "diff --git a/redash/handlers/visualizations.py b/redash/handlers/visualizations.py\n--- a/redash/handlers/visualizations.py\n+++ b/redash/handlers/visualizations.py\n@@ -1,9 +1,12 @@\n import json\n+\n from flask import request\n \n from redash import models\n-from redash.permissions import require_permission, require_admin_or_owner\n from redash.handlers.base import BaseResource, get_object_or_404\n+from redash.permissions import (require_admin_or_owner,\n+ require_object_modify_permission,\n+ require_permission)\n \n \n class VisualizationListResource(BaseResource):\n@@ -12,7 +15,7 @@\n kwargs = request.get_json(force=True)\n \n query = get_object_or_404(models.Query.get_by_id_and_org, kwargs.pop('query_id'), self.current_org)\n- require_admin_or_owner(query.user_id)\n+ require_object_modify_permission(query, self.current_user)\n \n kwargs['options'] = json.dumps(kwargs['options'])\n kwargs['query_rel'] = query\n@@ -28,7 +31,7 @@\n @require_permission('edit_query')\n def post(self, visualization_id):\n vis = get_object_or_404(models.Visualization.get_by_id_and_org, visualization_id, self.current_org)\n- require_admin_or_owner(vis.query_rel.user_id)\n+ require_object_modify_permission(vis.query_rel, self.current_user)\n \n kwargs = request.get_json(force=True)\n if 'options' in kwargs:\n@@ -45,6 +48,6 @@\n @require_permission('edit_query')\n def delete(self, visualization_id):\n vis = get_object_or_404(models.Visualization.get_by_id_and_org, visualization_id, self.current_org)\n- require_admin_or_owner(vis.query_rel.user_id)\n+ require_object_modify_permission(vis.query_rel, self.current_user)\n models.db.session.delete(vis)\n models.db.session.commit()\ndiff --git a/redash/permissions.py b/redash/permissions.py\n--- a/redash/permissions.py\n+++ b/redash/permissions.py\n@@ -1,6 +1,7 @@\n+import functools\n+\n from flask_login import current_user\n from flask_restful import abort\n-import functools\n from funcy import flatten\n \n view_only = True\n", "issue": "Redash Permissions not working for some use cases\n### Issue Summary\r\n\r\nCurrently, when query owner grants permission to another user for a query, the user is still unable to perform the following tasks:\r\n\r\n* change data source\r\n* schedule the query\r\n* add and save new visualisation\r\n\r\nI believe the user should have the ability to do all the things that the owner could do once permission has been granted.\r\n\r\n### Technical details:\r\n\r\n* Redash Version: 1.0.3\r\n* Browser/OS: Chrome\r\n* How did you install Redash: AWS using the AMI\r\n\n", "before_files": [{"content": "import json\nfrom flask import request\n\nfrom redash import models\nfrom redash.permissions import require_permission, require_admin_or_owner\nfrom redash.handlers.base import BaseResource, get_object_or_404\n\n\nclass VisualizationListResource(BaseResource):\n @require_permission('edit_query')\n def post(self):\n kwargs = request.get_json(force=True)\n\n query = get_object_or_404(models.Query.get_by_id_and_org, kwargs.pop('query_id'), self.current_org)\n require_admin_or_owner(query.user_id)\n\n kwargs['options'] = json.dumps(kwargs['options'])\n kwargs['query_rel'] = query\n\n vis = models.Visualization(**kwargs)\n models.db.session.add(vis)\n models.db.session.commit()\n d = vis.to_dict(with_query=False)\n return d\n\n\nclass VisualizationResource(BaseResource):\n @require_permission('edit_query')\n def post(self, visualization_id):\n vis = get_object_or_404(models.Visualization.get_by_id_and_org, visualization_id, self.current_org)\n require_admin_or_owner(vis.query_rel.user_id)\n\n kwargs = request.get_json(force=True)\n if 'options' in kwargs:\n kwargs['options'] = json.dumps(kwargs['options'])\n\n kwargs.pop('id', None)\n kwargs.pop('query_id', None)\n\n self.update_model(vis, kwargs)\n d = vis.to_dict(with_query=False)\n models.db.session.commit()\n return d\n\n @require_permission('edit_query')\n def delete(self, visualization_id):\n vis = get_object_or_404(models.Visualization.get_by_id_and_org, visualization_id, self.current_org)\n require_admin_or_owner(vis.query_rel.user_id)\n models.db.session.delete(vis)\n models.db.session.commit()\n", "path": "redash/handlers/visualizations.py"}, {"content": "from flask_login import current_user\nfrom flask_restful import abort\nimport functools\nfrom funcy import flatten\n\nview_only = True\nnot_view_only = False\n\nACCESS_TYPE_VIEW = 'view'\nACCESS_TYPE_MODIFY = 'modify'\nACCESS_TYPE_DELETE = 'delete'\n\nACCESS_TYPES = (ACCESS_TYPE_VIEW, ACCESS_TYPE_MODIFY, ACCESS_TYPE_DELETE)\n\n\ndef has_access(object_groups, user, need_view_only):\n if 'admin' in user.permissions:\n return True\n\n matching_groups = set(object_groups.keys()).intersection(user.group_ids)\n\n if not matching_groups:\n return False\n\n required_level = 1 if need_view_only else 2\n\n group_level = 1 if all(flatten([object_groups[group] for group in matching_groups])) else 2\n\n return required_level <= group_level\n\n\ndef require_access(object_groups, user, need_view_only):\n if not has_access(object_groups, user, need_view_only):\n abort(403)\n\n\nclass require_permissions(object):\n def __init__(self, permissions):\n self.permissions = permissions\n\n def __call__(self, fn):\n @functools.wraps(fn)\n def decorated(*args, **kwargs):\n has_permissions = current_user.has_permissions(self.permissions)\n\n if has_permissions:\n return fn(*args, **kwargs)\n else:\n abort(403)\n\n return decorated\n\n\ndef require_permission(permission):\n return require_permissions((permission,))\n\n\ndef require_admin(fn):\n return require_permission('admin')(fn)\n\n\ndef require_super_admin(fn):\n return require_permission('super_admin')(fn)\n\n\ndef has_permission_or_owner(permission, object_owner_id):\n return int(object_owner_id) == current_user.id or current_user.has_permission(permission)\n\n\ndef is_admin_or_owner(object_owner_id):\n return has_permission_or_owner('admin', object_owner_id)\n\n\ndef require_permission_or_owner(permission, object_owner_id):\n if not has_permission_or_owner(permission, object_owner_id):\n abort(403)\n\n\ndef require_admin_or_owner(object_owner_id):\n if not is_admin_or_owner(object_owner_id):\n abort(403, message=\"You don't have permission to edit this resource.\")\n\n\ndef can_modify(obj, user):\n return is_admin_or_owner(obj.user_id) or user.has_access(obj, ACCESS_TYPE_MODIFY)\n\n\ndef require_object_modify_permission(obj, user):\n if not can_modify(obj, user):\n abort(403)\n", "path": "redash/permissions.py"}], "after_files": [{"content": "import json\n\nfrom flask import request\n\nfrom redash import models\nfrom redash.handlers.base import BaseResource, get_object_or_404\nfrom redash.permissions import (require_admin_or_owner,\n require_object_modify_permission,\n require_permission)\n\n\nclass VisualizationListResource(BaseResource):\n @require_permission('edit_query')\n def post(self):\n kwargs = request.get_json(force=True)\n\n query = get_object_or_404(models.Query.get_by_id_and_org, kwargs.pop('query_id'), self.current_org)\n require_object_modify_permission(query, self.current_user)\n\n kwargs['options'] = json.dumps(kwargs['options'])\n kwargs['query_rel'] = query\n\n vis = models.Visualization(**kwargs)\n models.db.session.add(vis)\n models.db.session.commit()\n d = vis.to_dict(with_query=False)\n return d\n\n\nclass VisualizationResource(BaseResource):\n @require_permission('edit_query')\n def post(self, visualization_id):\n vis = get_object_or_404(models.Visualization.get_by_id_and_org, visualization_id, self.current_org)\n require_object_modify_permission(vis.query_rel, self.current_user)\n\n kwargs = request.get_json(force=True)\n if 'options' in kwargs:\n kwargs['options'] = json.dumps(kwargs['options'])\n\n kwargs.pop('id', None)\n kwargs.pop('query_id', None)\n\n self.update_model(vis, kwargs)\n d = vis.to_dict(with_query=False)\n models.db.session.commit()\n return d\n\n @require_permission('edit_query')\n def delete(self, visualization_id):\n vis = get_object_or_404(models.Visualization.get_by_id_and_org, visualization_id, self.current_org)\n require_object_modify_permission(vis.query_rel, self.current_user)\n models.db.session.delete(vis)\n models.db.session.commit()\n", "path": "redash/handlers/visualizations.py"}, {"content": "import functools\n\nfrom flask_login import current_user\nfrom flask_restful import abort\nfrom funcy import flatten\n\nview_only = True\nnot_view_only = False\n\nACCESS_TYPE_VIEW = 'view'\nACCESS_TYPE_MODIFY = 'modify'\nACCESS_TYPE_DELETE = 'delete'\n\nACCESS_TYPES = (ACCESS_TYPE_VIEW, ACCESS_TYPE_MODIFY, ACCESS_TYPE_DELETE)\n\n\ndef has_access(object_groups, user, need_view_only):\n if 'admin' in user.permissions:\n return True\n\n matching_groups = set(object_groups.keys()).intersection(user.group_ids)\n\n if not matching_groups:\n return False\n\n required_level = 1 if need_view_only else 2\n\n group_level = 1 if all(flatten([object_groups[group] for group in matching_groups])) else 2\n\n return required_level <= group_level\n\n\ndef require_access(object_groups, user, need_view_only):\n if not has_access(object_groups, user, need_view_only):\n abort(403)\n\n\nclass require_permissions(object):\n def __init__(self, permissions):\n self.permissions = permissions\n\n def __call__(self, fn):\n @functools.wraps(fn)\n def decorated(*args, **kwargs):\n has_permissions = current_user.has_permissions(self.permissions)\n\n if has_permissions:\n return fn(*args, **kwargs)\n else:\n abort(403)\n\n return decorated\n\n\ndef require_permission(permission):\n return require_permissions((permission,))\n\n\ndef require_admin(fn):\n return require_permission('admin')(fn)\n\n\ndef require_super_admin(fn):\n return require_permission('super_admin')(fn)\n\n\ndef has_permission_or_owner(permission, object_owner_id):\n return int(object_owner_id) == current_user.id or current_user.has_permission(permission)\n\n\ndef is_admin_or_owner(object_owner_id):\n return has_permission_or_owner('admin', object_owner_id)\n\n\ndef require_permission_or_owner(permission, object_owner_id):\n if not has_permission_or_owner(permission, object_owner_id):\n abort(403)\n\n\ndef require_admin_or_owner(object_owner_id):\n if not is_admin_or_owner(object_owner_id):\n abort(403, message=\"You don't have permission to edit this resource.\")\n\n\ndef can_modify(obj, user):\n return is_admin_or_owner(obj.user_id) or user.has_access(obj, ACCESS_TYPE_MODIFY)\n\n\ndef require_object_modify_permission(obj, user):\n if not can_modify(obj, user):\n abort(403)\n", "path": "redash/permissions.py"}]}
| 1,581 | 496 |
gh_patches_debug_16820
|
rasdani/github-patches
|
git_diff
|
PokemonGoF__PokemonGo-Bot-4568
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add option to separate usage of incubator unbreakable from breakable (2)
## Short Description:
- The 2, 5 and 10km eggs can be dispatched between either the infinite or the breakables incubators.
I wasn't able to test this option in real-game, since well... My bot is sleeping. From the tests I ran, the option should behave as excepted.
## Fixes/Resolves/Closes (please use correct syntax):
- Closes #3503
- Closes #3552
Not sure why it failed before (cf #4556)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pokemongo_bot/cell_workers/incubate_eggs.py`
Content:
```
1 from datetime import datetime, timedelta
2
3 from pokemongo_bot.human_behaviour import sleep
4 from pokemongo_bot.base_task import BaseTask
5
6
7 class IncubateEggs(BaseTask):
8 SUPPORTED_TASK_API_VERSION = 1
9
10 last_km_walked = 0
11
12 def initialize(self):
13 self.next_update = None
14 self.ready_incubators = []
15 self.used_incubators = []
16 self.eggs = []
17 self.km_walked = 0
18 self.hatching_animation_delay = 4.20
19 self.max_iv = 45.0
20
21 self._process_config()
22
23 def _process_config(self):
24 self.longer_eggs_first = self.config.get("longer_eggs_first", True)
25 self.min_interval = self.config.get('min_interval', 120)
26
27 self.breakable_incubator = self.config.get("breakable", [])
28 self.infinite_incubator = self.config.get("infinite", [])
29
30 def work(self):
31 try:
32 self._check_inventory()
33 except:
34 return
35
36 if self.used_incubators and IncubateEggs.last_km_walked != self.km_walked:
37 self.used_incubators.sort(key=lambda x: x.get("km"))
38 km_left = self.used_incubators[0]['km']-self.km_walked
39 if km_left <= 0:
40 self._hatch_eggs()
41 else:
42 self.bot.metrics.next_hatching_km(km_left)
43
44 if self._should_print():
45 self._print_eggs()
46 self._compute_next_update()
47
48 IncubateEggs.last_km_walked = self.km_walked
49
50 sorting = self.longer_eggs_first
51 self.eggs.sort(key=lambda x: x.get("km"), reverse=sorting)
52
53 if self.ready_incubators:
54 self._apply_incubators()
55
56 def _apply_incubators(self):
57 for incubator in self.ready_incubators:
58 if incubator.get('used', False):
59 continue
60 for egg in self.eggs:
61 if egg["used"] or egg["km"] == -1:
62 continue
63
64 if self.breakable_incubator:
65 # test if the incubator is of type breakable
66 if incubator.get('uses_remaining') is not None:
67 if egg["km"] not in self.breakable_incubator:
68 continue
69
70 if self.infinite_incubator:
71 # test if the incubator is of type infinite
72 if incubator.get('uses_remaining') is None:
73 if egg["km"] not in self.infinite_incubator:
74 continue
75
76 self.emit_event(
77 'incubate_try',
78 level='debug',
79 formatted="Attempting to apply incubator {incubator_id} to egg {egg_id}",
80 data={
81 'incubator_id': incubator['id'],
82 'egg_id': egg['id']
83 }
84 )
85 ret = self.bot.api.use_item_egg_incubator(
86 item_id=incubator["id"],
87 pokemon_id=egg["id"]
88 )
89 if ret:
90 code = ret.get("responses", {}).get("USE_ITEM_EGG_INCUBATOR", {}).get("result", 0)
91 if code == 1:
92 self.emit_event(
93 'incubate',
94 formatted='Incubating a {distance_in_km} egg.',
95 data={
96 'distance_in_km': str(egg['km'])
97 }
98 )
99 egg["used"] = True
100 incubator["used"] = True
101 break
102 elif code == 5 or code == 7:
103 self.emit_event(
104 'incubator_already_used',
105 level='debug',
106 formatted='Incubator in use.',
107 )
108 incubator["used"] = True
109 break
110 elif code == 6:
111 self.emit_event(
112 'egg_already_incubating',
113 level='debug',
114 formatted='Egg already incubating',
115 )
116 egg["used"] = True
117
118 def _check_inventory(self, lookup_ids=[]):
119 inv = {}
120 response_dict = self.bot.api.get_inventory()
121 matched_pokemon = []
122 temp_eggs = []
123 temp_used_incubators = []
124 temp_ready_incubators = []
125 inv = reduce(
126 dict.__getitem__,
127 ["responses", "GET_INVENTORY", "inventory_delta", "inventory_items"],
128 response_dict
129 )
130 for inv_data in inv:
131 inv_data = inv_data.get("inventory_item_data", {})
132 if "egg_incubators" in inv_data:
133 temp_used_incubators = []
134 temp_ready_incubators = []
135 incubators = inv_data.get("egg_incubators", {}).get("egg_incubator",[])
136 if isinstance(incubators, basestring): # checking for old response
137 incubators = [incubators]
138 for incubator in incubators:
139 if 'pokemon_id' in incubator:
140 start_km = incubator.get('start_km_walked', 9001)
141 km_walked = incubator.get('target_km_walked', 9001)
142 temp_used_incubators.append({
143 "id": incubator.get('id', -1),
144 "km": km_walked,
145 "km_needed": (km_walked - start_km)
146 })
147 else:
148 temp_ready_incubators.append({
149 "id": incubator.get('id', -1)
150 })
151 continue
152 if "pokemon_data" in inv_data:
153 pokemon = inv_data.get("pokemon_data", {})
154 if pokemon.get("is_egg", False) and "egg_incubator_id" not in pokemon:
155 temp_eggs.append({
156 "id": pokemon.get("id", -1),
157 "km": pokemon.get("egg_km_walked_target", -1),
158 "used": False
159 })
160 elif 'is_egg' not in pokemon and pokemon['id'] in lookup_ids:
161 pokemon.update({
162 "iv": [
163 pokemon.get('individual_attack', 0),
164 pokemon.get('individual_defense', 0),
165 pokemon.get('individual_stamina', 0)
166 ]})
167 matched_pokemon.append(pokemon)
168 continue
169 if "player_stats" in inv_data:
170 self.km_walked = inv_data.get("player_stats", {}).get("km_walked", 0)
171 if temp_used_incubators:
172 self.used_incubators = temp_used_incubators
173 if temp_ready_incubators:
174 self.ready_incubators = temp_ready_incubators
175 if temp_eggs:
176 self.eggs = temp_eggs
177 return matched_pokemon
178
179 def _hatch_eggs(self):
180 response_dict = self.bot.api.get_hatched_eggs()
181 log_color = 'green'
182 try:
183 result = reduce(dict.__getitem__, ["responses", "GET_HATCHED_EGGS"], response_dict)
184 except KeyError:
185 return
186 pokemon_ids = []
187 if 'pokemon_id' in result:
188 pokemon_ids = [id for id in result['pokemon_id']]
189 stardust = result.get('stardust_awarded', "error")
190 candy = result.get('candy_awarded', "error")
191 xp = result.get('experience_awarded', "error")
192 sleep(self.hatching_animation_delay)
193 self.bot.latest_inventory = None
194 try:
195 pokemon_data = self._check_inventory(pokemon_ids)
196 for pokemon in pokemon_data:
197 # pokemon ids seem to be offset by one
198 if pokemon['pokemon_id']!=-1:
199 pokemon['name'] = self.bot.pokemon_list[(pokemon.get('pokemon_id')-1)]['Name']
200 else:
201 pokemon['name'] = "error"
202 except:
203 pokemon_data = [{"name":"error","cp":"error","iv":"error"}]
204 if not pokemon_ids or pokemon_data[0]['name'] == "error":
205 self.emit_event(
206 'egg_hatched',
207 data={
208 'pokemon': 'error',
209 'cp': 'error',
210 'iv': 'error',
211 'exp': 'error',
212 'stardust': 'error',
213 'candy': 'error',
214 }
215 )
216 return
217 for i in range(len(pokemon_data)):
218 msg = "Egg hatched with a {pokemon} (CP {cp} - IV {iv}), {exp} exp, {stardust} stardust and {candy} candies."
219 self.bot.metrics.hatched_eggs(1)
220 self.emit_event(
221 'egg_hatched',
222 formatted=msg,
223 data={
224 'pokemon': pokemon_data[i]['name'],
225 'cp': pokemon_data[i]['cp'],
226 'iv': "{} {}".format(
227 "/".join(map(str, pokemon_data[i]['iv'])),
228 round(sum(pokemon_data[i]['iv'])/self.max_iv, 2)
229 ),
230 'exp': xp[i],
231 'stardust': stardust[i],
232 'candy': candy[i],
233 }
234 )
235
236 def _print_eggs(self):
237 if not self.used_incubators:
238 return
239
240 self.used_incubators.sort(key=lambda x: x.get("km"))
241
242 eggs = ['{:.2f}/{} km'.format(e['km_needed']-e['km']+self.km_walked, e['km_needed']) for e in self.used_incubators]
243
244 self.emit_event(
245 'next_egg_incubates',
246 formatted='Eggs incubating: [{eggs}] (Eggs left: {eggs_left}, Incubating: {eggs_inc})',
247 data={
248 'eggs_left': len(self.eggs),
249 'eggs_inc': len(self.used_incubators),
250 'eggs': ', '.join(eggs)
251 }
252 )
253
254 def _should_print(self):
255 """
256 Returns a value indicating whether the eggs should be displayed.
257 :return: True if the stats should be displayed; otherwise, False.
258 :rtype: bool
259 """
260 return self.next_update is None or datetime.now() >= self.next_update
261
262 def _compute_next_update(self):
263 """
264 Computes the next update datetime based on the minimum update interval.
265 :return: Nothing.
266 :rtype: None
267 """
268 self.next_update = datetime.now() + timedelta(seconds=self.min_interval)
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pokemongo_bot/cell_workers/incubate_eggs.py b/pokemongo_bot/cell_workers/incubate_eggs.py
--- a/pokemongo_bot/cell_workers/incubate_eggs.py
+++ b/pokemongo_bot/cell_workers/incubate_eggs.py
@@ -24,8 +24,8 @@
self.longer_eggs_first = self.config.get("longer_eggs_first", True)
self.min_interval = self.config.get('min_interval', 120)
- self.breakable_incubator = self.config.get("breakable", [])
- self.infinite_incubator = self.config.get("infinite", [])
+ self.breakable_incubator = self.config.get("breakable", [2,5,10])
+ self.infinite_incubator = self.config.get("infinite", [2,5,10])
def work(self):
try:
@@ -265,4 +265,4 @@
:return: Nothing.
:rtype: None
"""
- self.next_update = datetime.now() + timedelta(seconds=self.min_interval)
\ No newline at end of file
+ self.next_update = datetime.now() + timedelta(seconds=self.min_interval)
|
{"golden_diff": "diff --git a/pokemongo_bot/cell_workers/incubate_eggs.py b/pokemongo_bot/cell_workers/incubate_eggs.py\n--- a/pokemongo_bot/cell_workers/incubate_eggs.py\n+++ b/pokemongo_bot/cell_workers/incubate_eggs.py\n@@ -24,8 +24,8 @@\n self.longer_eggs_first = self.config.get(\"longer_eggs_first\", True)\n self.min_interval = self.config.get('min_interval', 120)\n \n- self.breakable_incubator = self.config.get(\"breakable\", [])\n- self.infinite_incubator = self.config.get(\"infinite\", [])\n+ self.breakable_incubator = self.config.get(\"breakable\", [2,5,10])\n+ self.infinite_incubator = self.config.get(\"infinite\", [2,5,10])\n \n def work(self):\n try:\n@@ -265,4 +265,4 @@\n :return: Nothing.\n :rtype: None\n \"\"\"\n- self.next_update = datetime.now() + timedelta(seconds=self.min_interval)\n\\ No newline at end of file\n+ self.next_update = datetime.now() + timedelta(seconds=self.min_interval)\n", "issue": "Add option to separate usage of incubator unbreakable from breakable (2)\n## Short Description:\n- The 2, 5 and 10km eggs can be dispatched between either the infinite or the breakables incubators.\n\nI wasn't able to test this option in real-game, since well... My bot is sleeping. From the tests I ran, the option should behave as excepted.\n## Fixes/Resolves/Closes (please use correct syntax):\n- Closes #3503\n- Closes #3552 \n\nNot sure why it failed before (cf #4556)\n\n", "before_files": [{"content": "from datetime import datetime, timedelta\n\nfrom pokemongo_bot.human_behaviour import sleep\nfrom pokemongo_bot.base_task import BaseTask\n\n\nclass IncubateEggs(BaseTask):\n SUPPORTED_TASK_API_VERSION = 1\n\n last_km_walked = 0\n\n def initialize(self):\n self.next_update = None\n self.ready_incubators = []\n self.used_incubators = []\n self.eggs = []\n self.km_walked = 0\n self.hatching_animation_delay = 4.20\n self.max_iv = 45.0\n\n self._process_config()\n\n def _process_config(self):\n self.longer_eggs_first = self.config.get(\"longer_eggs_first\", True)\n self.min_interval = self.config.get('min_interval', 120)\n \n self.breakable_incubator = self.config.get(\"breakable\", [])\n self.infinite_incubator = self.config.get(\"infinite\", [])\n \n def work(self):\n try:\n self._check_inventory()\n except:\n return\n\n if self.used_incubators and IncubateEggs.last_km_walked != self.km_walked:\n self.used_incubators.sort(key=lambda x: x.get(\"km\"))\n km_left = self.used_incubators[0]['km']-self.km_walked\n if km_left <= 0:\n self._hatch_eggs()\n else:\n self.bot.metrics.next_hatching_km(km_left)\n\n if self._should_print():\n self._print_eggs()\n self._compute_next_update()\n\n IncubateEggs.last_km_walked = self.km_walked\n\n sorting = self.longer_eggs_first\n self.eggs.sort(key=lambda x: x.get(\"km\"), reverse=sorting)\n\n if self.ready_incubators:\n self._apply_incubators()\n\n def _apply_incubators(self):\n for incubator in self.ready_incubators:\n if incubator.get('used', False):\n continue\n for egg in self.eggs:\n if egg[\"used\"] or egg[\"km\"] == -1:\n continue\n \n if self.breakable_incubator:\n # test if the incubator is of type breakable\n if incubator.get('uses_remaining') is not None:\n if egg[\"km\"] not in self.breakable_incubator:\n continue\n \n if self.infinite_incubator:\n # test if the incubator is of type infinite\n if incubator.get('uses_remaining') is None:\n if egg[\"km\"] not in self.infinite_incubator:\n continue\n \n self.emit_event(\n 'incubate_try',\n level='debug',\n formatted=\"Attempting to apply incubator {incubator_id} to egg {egg_id}\",\n data={\n 'incubator_id': incubator['id'],\n 'egg_id': egg['id']\n }\n )\n ret = self.bot.api.use_item_egg_incubator(\n item_id=incubator[\"id\"],\n pokemon_id=egg[\"id\"]\n )\n if ret:\n code = ret.get(\"responses\", {}).get(\"USE_ITEM_EGG_INCUBATOR\", {}).get(\"result\", 0)\n if code == 1:\n self.emit_event(\n 'incubate',\n formatted='Incubating a {distance_in_km} egg.',\n data={\n 'distance_in_km': str(egg['km'])\n }\n )\n egg[\"used\"] = True\n incubator[\"used\"] = True\n break\n elif code == 5 or code == 7:\n self.emit_event(\n 'incubator_already_used',\n level='debug',\n formatted='Incubator in use.',\n )\n incubator[\"used\"] = True\n break\n elif code == 6:\n self.emit_event(\n 'egg_already_incubating',\n level='debug',\n formatted='Egg already incubating',\n )\n egg[\"used\"] = True\n\n def _check_inventory(self, lookup_ids=[]):\n inv = {}\n response_dict = self.bot.api.get_inventory()\n matched_pokemon = []\n temp_eggs = []\n temp_used_incubators = []\n temp_ready_incubators = []\n inv = reduce(\n dict.__getitem__,\n [\"responses\", \"GET_INVENTORY\", \"inventory_delta\", \"inventory_items\"],\n response_dict\n )\n for inv_data in inv:\n inv_data = inv_data.get(\"inventory_item_data\", {})\n if \"egg_incubators\" in inv_data:\n temp_used_incubators = []\n temp_ready_incubators = []\n incubators = inv_data.get(\"egg_incubators\", {}).get(\"egg_incubator\",[])\n if isinstance(incubators, basestring): # checking for old response\n incubators = [incubators]\n for incubator in incubators: \n if 'pokemon_id' in incubator:\n start_km = incubator.get('start_km_walked', 9001)\n km_walked = incubator.get('target_km_walked', 9001)\n temp_used_incubators.append({\n \"id\": incubator.get('id', -1),\n \"km\": km_walked,\n \"km_needed\": (km_walked - start_km)\n })\n else:\n temp_ready_incubators.append({\n \"id\": incubator.get('id', -1)\n })\n continue\n if \"pokemon_data\" in inv_data:\n pokemon = inv_data.get(\"pokemon_data\", {})\n if pokemon.get(\"is_egg\", False) and \"egg_incubator_id\" not in pokemon:\n temp_eggs.append({\n \"id\": pokemon.get(\"id\", -1),\n \"km\": pokemon.get(\"egg_km_walked_target\", -1),\n \"used\": False\n })\n elif 'is_egg' not in pokemon and pokemon['id'] in lookup_ids:\n pokemon.update({\n \"iv\": [\n pokemon.get('individual_attack', 0),\n pokemon.get('individual_defense', 0),\n pokemon.get('individual_stamina', 0)\n ]})\n matched_pokemon.append(pokemon)\n continue\n if \"player_stats\" in inv_data:\n self.km_walked = inv_data.get(\"player_stats\", {}).get(\"km_walked\", 0)\n if temp_used_incubators:\n self.used_incubators = temp_used_incubators\n if temp_ready_incubators:\n self.ready_incubators = temp_ready_incubators\n if temp_eggs:\n self.eggs = temp_eggs\n return matched_pokemon\n\n def _hatch_eggs(self):\n response_dict = self.bot.api.get_hatched_eggs()\n log_color = 'green'\n try:\n result = reduce(dict.__getitem__, [\"responses\", \"GET_HATCHED_EGGS\"], response_dict)\n except KeyError:\n return\n pokemon_ids = []\n if 'pokemon_id' in result:\n pokemon_ids = [id for id in result['pokemon_id']]\n stardust = result.get('stardust_awarded', \"error\")\n candy = result.get('candy_awarded', \"error\")\n xp = result.get('experience_awarded', \"error\")\n sleep(self.hatching_animation_delay)\n self.bot.latest_inventory = None\n try:\n pokemon_data = self._check_inventory(pokemon_ids)\n for pokemon in pokemon_data:\n # pokemon ids seem to be offset by one\n if pokemon['pokemon_id']!=-1:\n pokemon['name'] = self.bot.pokemon_list[(pokemon.get('pokemon_id')-1)]['Name']\n else:\n pokemon['name'] = \"error\"\n except:\n pokemon_data = [{\"name\":\"error\",\"cp\":\"error\",\"iv\":\"error\"}]\n if not pokemon_ids or pokemon_data[0]['name'] == \"error\":\n self.emit_event(\n 'egg_hatched',\n data={\n 'pokemon': 'error',\n 'cp': 'error',\n 'iv': 'error',\n 'exp': 'error',\n 'stardust': 'error',\n 'candy': 'error',\n }\n )\n return\n for i in range(len(pokemon_data)):\n msg = \"Egg hatched with a {pokemon} (CP {cp} - IV {iv}), {exp} exp, {stardust} stardust and {candy} candies.\"\n self.bot.metrics.hatched_eggs(1)\n self.emit_event(\n 'egg_hatched',\n formatted=msg,\n data={\n 'pokemon': pokemon_data[i]['name'],\n 'cp': pokemon_data[i]['cp'],\n 'iv': \"{} {}\".format(\n \"/\".join(map(str, pokemon_data[i]['iv'])),\n round(sum(pokemon_data[i]['iv'])/self.max_iv, 2)\n ),\n 'exp': xp[i],\n 'stardust': stardust[i],\n 'candy': candy[i],\n }\n )\n\n def _print_eggs(self):\n if not self.used_incubators:\n return\n\n self.used_incubators.sort(key=lambda x: x.get(\"km\"))\n \n eggs = ['{:.2f}/{} km'.format(e['km_needed']-e['km']+self.km_walked, e['km_needed']) for e in self.used_incubators]\n\n self.emit_event(\n 'next_egg_incubates',\n formatted='Eggs incubating: [{eggs}] (Eggs left: {eggs_left}, Incubating: {eggs_inc})',\n data={\n 'eggs_left': len(self.eggs),\n 'eggs_inc': len(self.used_incubators),\n 'eggs': ', '.join(eggs)\n }\n )\n \n def _should_print(self):\n \"\"\"\n Returns a value indicating whether the eggs should be displayed.\n :return: True if the stats should be displayed; otherwise, False.\n :rtype: bool\n \"\"\"\n return self.next_update is None or datetime.now() >= self.next_update\n\n def _compute_next_update(self):\n \"\"\"\n Computes the next update datetime based on the minimum update interval.\n :return: Nothing.\n :rtype: None\n \"\"\"\n self.next_update = datetime.now() + timedelta(seconds=self.min_interval)", "path": "pokemongo_bot/cell_workers/incubate_eggs.py"}], "after_files": [{"content": "from datetime import datetime, timedelta\n\nfrom pokemongo_bot.human_behaviour import sleep\nfrom pokemongo_bot.base_task import BaseTask\n\n\nclass IncubateEggs(BaseTask):\n SUPPORTED_TASK_API_VERSION = 1\n\n last_km_walked = 0\n\n def initialize(self):\n self.next_update = None\n self.ready_incubators = []\n self.used_incubators = []\n self.eggs = []\n self.km_walked = 0\n self.hatching_animation_delay = 4.20\n self.max_iv = 45.0\n\n self._process_config()\n\n def _process_config(self):\n self.longer_eggs_first = self.config.get(\"longer_eggs_first\", True)\n self.min_interval = self.config.get('min_interval', 120)\n \n self.breakable_incubator = self.config.get(\"breakable\", [2,5,10])\n self.infinite_incubator = self.config.get(\"infinite\", [2,5,10])\n \n def work(self):\n try:\n self._check_inventory()\n except:\n return\n\n if self.used_incubators and IncubateEggs.last_km_walked != self.km_walked:\n self.used_incubators.sort(key=lambda x: x.get(\"km\"))\n km_left = self.used_incubators[0]['km']-self.km_walked\n if km_left <= 0:\n self._hatch_eggs()\n else:\n self.bot.metrics.next_hatching_km(km_left)\n\n if self._should_print():\n self._print_eggs()\n self._compute_next_update()\n\n IncubateEggs.last_km_walked = self.km_walked\n\n sorting = self.longer_eggs_first\n self.eggs.sort(key=lambda x: x.get(\"km\"), reverse=sorting)\n\n if self.ready_incubators:\n self._apply_incubators()\n\n def _apply_incubators(self):\n for incubator in self.ready_incubators:\n if incubator.get('used', False):\n continue\n for egg in self.eggs:\n if egg[\"used\"] or egg[\"km\"] == -1:\n continue\n \n if self.breakable_incubator:\n # test if the incubator is of type breakable\n if incubator.get('uses_remaining') is not None:\n if egg[\"km\"] not in self.breakable_incubator:\n continue\n \n if self.infinite_incubator:\n # test if the incubator is of type infinite\n if incubator.get('uses_remaining') is None:\n if egg[\"km\"] not in self.infinite_incubator:\n continue\n \n self.emit_event(\n 'incubate_try',\n level='debug',\n formatted=\"Attempting to apply incubator {incubator_id} to egg {egg_id}\",\n data={\n 'incubator_id': incubator['id'],\n 'egg_id': egg['id']\n }\n )\n ret = self.bot.api.use_item_egg_incubator(\n item_id=incubator[\"id\"],\n pokemon_id=egg[\"id\"]\n )\n if ret:\n code = ret.get(\"responses\", {}).get(\"USE_ITEM_EGG_INCUBATOR\", {}).get(\"result\", 0)\n if code == 1:\n self.emit_event(\n 'incubate',\n formatted='Incubating a {distance_in_km} egg.',\n data={\n 'distance_in_km': str(egg['km'])\n }\n )\n egg[\"used\"] = True\n incubator[\"used\"] = True\n break\n elif code == 5 or code == 7:\n self.emit_event(\n 'incubator_already_used',\n level='debug',\n formatted='Incubator in use.',\n )\n incubator[\"used\"] = True\n break\n elif code == 6:\n self.emit_event(\n 'egg_already_incubating',\n level='debug',\n formatted='Egg already incubating',\n )\n egg[\"used\"] = True\n\n def _check_inventory(self, lookup_ids=[]):\n inv = {}\n response_dict = self.bot.api.get_inventory()\n matched_pokemon = []\n temp_eggs = []\n temp_used_incubators = []\n temp_ready_incubators = []\n inv = reduce(\n dict.__getitem__,\n [\"responses\", \"GET_INVENTORY\", \"inventory_delta\", \"inventory_items\"],\n response_dict\n )\n for inv_data in inv:\n inv_data = inv_data.get(\"inventory_item_data\", {})\n if \"egg_incubators\" in inv_data:\n temp_used_incubators = []\n temp_ready_incubators = []\n incubators = inv_data.get(\"egg_incubators\", {}).get(\"egg_incubator\",[])\n if isinstance(incubators, basestring): # checking for old response\n incubators = [incubators]\n for incubator in incubators: \n if 'pokemon_id' in incubator:\n start_km = incubator.get('start_km_walked', 9001)\n km_walked = incubator.get('target_km_walked', 9001)\n temp_used_incubators.append({\n \"id\": incubator.get('id', -1),\n \"km\": km_walked,\n \"km_needed\": (km_walked - start_km)\n })\n else:\n temp_ready_incubators.append({\n \"id\": incubator.get('id', -1)\n })\n continue\n if \"pokemon_data\" in inv_data:\n pokemon = inv_data.get(\"pokemon_data\", {})\n if pokemon.get(\"is_egg\", False) and \"egg_incubator_id\" not in pokemon:\n temp_eggs.append({\n \"id\": pokemon.get(\"id\", -1),\n \"km\": pokemon.get(\"egg_km_walked_target\", -1),\n \"used\": False\n })\n elif 'is_egg' not in pokemon and pokemon['id'] in lookup_ids:\n pokemon.update({\n \"iv\": [\n pokemon.get('individual_attack', 0),\n pokemon.get('individual_defense', 0),\n pokemon.get('individual_stamina', 0)\n ]})\n matched_pokemon.append(pokemon)\n continue\n if \"player_stats\" in inv_data:\n self.km_walked = inv_data.get(\"player_stats\", {}).get(\"km_walked\", 0)\n if temp_used_incubators:\n self.used_incubators = temp_used_incubators\n if temp_ready_incubators:\n self.ready_incubators = temp_ready_incubators\n if temp_eggs:\n self.eggs = temp_eggs\n return matched_pokemon\n\n def _hatch_eggs(self):\n response_dict = self.bot.api.get_hatched_eggs()\n log_color = 'green'\n try:\n result = reduce(dict.__getitem__, [\"responses\", \"GET_HATCHED_EGGS\"], response_dict)\n except KeyError:\n return\n pokemon_ids = []\n if 'pokemon_id' in result:\n pokemon_ids = [id for id in result['pokemon_id']]\n stardust = result.get('stardust_awarded', \"error\")\n candy = result.get('candy_awarded', \"error\")\n xp = result.get('experience_awarded', \"error\")\n sleep(self.hatching_animation_delay)\n self.bot.latest_inventory = None\n try:\n pokemon_data = self._check_inventory(pokemon_ids)\n for pokemon in pokemon_data:\n # pokemon ids seem to be offset by one\n if pokemon['pokemon_id']!=-1:\n pokemon['name'] = self.bot.pokemon_list[(pokemon.get('pokemon_id')-1)]['Name']\n else:\n pokemon['name'] = \"error\"\n except:\n pokemon_data = [{\"name\":\"error\",\"cp\":\"error\",\"iv\":\"error\"}]\n if not pokemon_ids or pokemon_data[0]['name'] == \"error\":\n self.emit_event(\n 'egg_hatched',\n data={\n 'pokemon': 'error',\n 'cp': 'error',\n 'iv': 'error',\n 'exp': 'error',\n 'stardust': 'error',\n 'candy': 'error',\n }\n )\n return\n for i in range(len(pokemon_data)):\n msg = \"Egg hatched with a {pokemon} (CP {cp} - IV {iv}), {exp} exp, {stardust} stardust and {candy} candies.\"\n self.bot.metrics.hatched_eggs(1)\n self.emit_event(\n 'egg_hatched',\n formatted=msg,\n data={\n 'pokemon': pokemon_data[i]['name'],\n 'cp': pokemon_data[i]['cp'],\n 'iv': \"{} {}\".format(\n \"/\".join(map(str, pokemon_data[i]['iv'])),\n round(sum(pokemon_data[i]['iv'])/self.max_iv, 2)\n ),\n 'exp': xp[i],\n 'stardust': stardust[i],\n 'candy': candy[i],\n }\n )\n\n def _print_eggs(self):\n if not self.used_incubators:\n return\n\n self.used_incubators.sort(key=lambda x: x.get(\"km\"))\n \n eggs = ['{:.2f}/{} km'.format(e['km_needed']-e['km']+self.km_walked, e['km_needed']) for e in self.used_incubators]\n\n self.emit_event(\n 'next_egg_incubates',\n formatted='Eggs incubating: [{eggs}] (Eggs left: {eggs_left}, Incubating: {eggs_inc})',\n data={\n 'eggs_left': len(self.eggs),\n 'eggs_inc': len(self.used_incubators),\n 'eggs': ', '.join(eggs)\n }\n )\n \n def _should_print(self):\n \"\"\"\n Returns a value indicating whether the eggs should be displayed.\n :return: True if the stats should be displayed; otherwise, False.\n :rtype: bool\n \"\"\"\n return self.next_update is None or datetime.now() >= self.next_update\n\n def _compute_next_update(self):\n \"\"\"\n Computes the next update datetime based on the minimum update interval.\n :return: Nothing.\n :rtype: None\n \"\"\"\n self.next_update = datetime.now() + timedelta(seconds=self.min_interval)\n", "path": "pokemongo_bot/cell_workers/incubate_eggs.py"}]}
| 3,349 | 277 |
gh_patches_debug_1800
|
rasdani/github-patches
|
git_diff
|
spack__spack-23014
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Installation issue with openjdk: "Error: NameError: name 'Prefix' is not defined"
### Steps to reproduce the issue
Latest spack develop on OS X gives:
```console
→ spack install -v hpcviewer
==> Warning: Missing a source id for [email protected]_9
[+] /usr (external openjdk-11.0.10_9-eoscwsxi5erdz5htmnixttlxjzhogare)
==> Installing hpcviewer-2021.03-a4q2ajczhy6u5dxcdszfx3lelqbf7tmd
==> No binary for hpcviewer-2021.03-a4q2ajczhy6u5dxcdszfx3lelqbf7tmd found: installing from source
==> Warning: Missing a source id for [email protected]_9
==> Warning: microarchitecture specific optimizations are not supported yet on mixed compiler toolchains [check [email protected] for further details]
==> Error: NameError: name 'Prefix' is not defined
```
### Information on your system
* Mac OS 10.15.5
* Spack cloned today
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `var/spack/repos/builtin/packages/openjdk/package.py`
Content:
```
1 # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
2 # Spack Project Developers. See the top-level COPYRIGHT file for details.
3 #
4 # SPDX-License-Identifier: (Apache-2.0 OR MIT)
5
6 import os
7 import platform
8 import re
9
10
11 # If you need to add a new version, please be aware that:
12 # - versions in the following dict are automatically added to the package
13 # - version tuple must be in the form (checksum, url)
14 # - checksum must be sha256
15 # - package key must be in the form '{os}-{arch}' where 'os' is in the
16 # format returned by platform.system() and 'arch' by platform.machine()
17
18 _versions = {
19 '11.0.9.1_1': {
20 'Linux-ppc64le': ('d94b6b46a14ab0974b1c1b89661741126d8cf8a0068b471b8f5fa286a71636b1', 'https://github.com/AdoptOpenJDK/openjdk11-binaries/releases/download/jdk-11.0.9.1%2B1/OpenJDK11U-jdk_ppc64le_linux_hotspot_11.0.9.1_1.tar.gz')},
21 '11.0.8_10': {
22 'Linux-x86_64': ('6e4cead158037cb7747ca47416474d4f408c9126be5b96f9befd532e0a762b47', 'https://github.com/AdoptOpenJDK/openjdk11-binaries/releases/download/jdk-11.0.8%2B10/OpenJDK11U-jdk_x64_linux_hotspot_11.0.8_10.tar.gz')},
23 '11.0.0-2020-01-01': {
24 'Linux-aarch64': ('05c7d9c90edacd853850fbb0f52f8aa482809d0452c599cb9fe0b28b3b4bf329', 'https://github.com/AdoptOpenJDK/openjdk11-binaries/releases/download/jdk11u-2020-01-01-06-13/OpenJDK11U-jdk_aarch64_linux_hotspot_2020-01-01-06-13.tar.gz')},
25 '11.0.2': {
26 'Linux-x86_64': ('99be79935354f5c0df1ad293620ea36d13f48ec3ea870c838f20c504c9668b57', 'https://download.java.net/java/GA/jdk11/9/GPL/openjdk-11.0.2_linux-x64_bin.tar.gz')},
27 '11.0.1': {
28 'Linux-x86_64': ('7a6bb980b9c91c478421f865087ad2d69086a0583aeeb9e69204785e8e97dcfd', 'https://download.java.net/java/GA/jdk11/13/GPL/openjdk-11.0.1_linux-x64_bin.tar.gz')},
29 '1.8.0_265-b01': {
30 'Linux-x86_64': ('1285da6278f2d38a790a21148d7e683f20de0799c44b937043830ef6b57f58c4', 'https://github.com/AdoptOpenJDK/openjdk8-binaries/releases/download/jdk8u265-b01/OpenJDK8U-jdk_x64_linux_hotspot_8u265b01.tar.gz')},
31 '1.8.0_191-b12': {
32 'Linux-aarch64': ('8eee0aede947b804f9a5f49c8a38b52aace8a30a9ebd9383b7d06042fb5a237c', 'https://github.com/AdoptOpenJDK/openjdk8-binaries/releases/download/jdk8u191-b12/OpenJDK8U-jdk_aarch64_linux_hotspot_8u191b12.tar.gz')},
33 '1.8.0_222-b10': {
34 'Linux-x86_64': ('20cff719c6de43f8bb58c7f59e251da7c1fa2207897c9a4768c8c669716dc819', 'https://github.com/AdoptOpenJDK/openjdk8-binaries/releases/download/jdk8u222-b10_openj9-0.15.1/OpenJDK8U-jdk_x64_linux_openj9_8u222b10_openj9-0.15.1.tar.gz')},
35 '1.8.0_202-b08': {
36 'Linux-x86_64': ('533dcd8d9ca15df231a1eb392fa713a66bca85a8e76d9b4ee30975f3823636b7', 'https://github.com/AdoptOpenJDK/openjdk8-binaries/releases/download/jdk8u202-b08/OpenJDK8U-jdk_x64_linux_openj9_8u202b08_openj9-0.12.0.tar.gz')},
37 '1.8.0_40-b25': {
38 'Linux-x86_64': ('79e96dce03a14271040023231a7d0ae374b755d48adf68bbdaec30294e4e2b88', 'https://download.java.net/openjdk/jdk8u40/ri/jdk_ri-8u40-b25-linux-x64-10_feb_2015.tar.gz')},
39 }
40
41
42 class Openjdk(Package):
43 """The free and opensource java implementation"""
44
45 homepage = "https://jdk.java.net"
46
47 for ver, packages in _versions.items():
48 key = "{0}-{1}".format(platform.system(), platform.machine())
49 pkg = packages.get(key)
50 if pkg:
51 version(ver, sha256=pkg[0], url=pkg[1])
52
53 provides('java@11', when='@11.0:11.99')
54 provides('java@10', when='@10.0:10.99')
55 provides('java@9', when='@9.0:9.99')
56 provides('java@8', when='@1.8.0:1.8.999')
57
58 conflicts('target=ppc64:', msg='openjdk is not available for ppc64 (big endian)')
59
60 # FIXME:
61 # 1. `extends('java')` doesn't work, you need to use `extends('openjdk')`
62 # 2. Packages cannot extend multiple packages, see #987
63 # 3. Update `YamlFilesystemView.merge` to allow a Package to completely
64 # override how it is symlinked into a view prefix. Then, spack activate
65 # can symlink all *.jar files to `prefix.lib.ext`
66 extendable = True
67
68 executables = ['^java$']
69
70 @classmethod
71 def determine_version(cls, exe):
72 output = Executable(exe)('-version', output=str, error=str)
73
74 # Make sure this is actually OpenJDK, not Oracle JDK
75 if 'openjdk' not in output:
76 return None
77
78 match = re.search(r'\(build (\S+)\)', output)
79 return match.group(1).replace('+', '_') if match else None
80
81 @property
82 def home(self):
83 """Most of the time, ``JAVA_HOME`` is simply ``spec['java'].prefix``.
84 However, if the user is using an externally installed JDK, it may be
85 symlinked. For example, on macOS, the ``java`` executable can be found
86 in ``/usr/bin``, but ``JAVA_HOME`` is actually
87 ``/Library/Java/JavaVirtualMachines/jdk-10.0.1.jdk/Contents/Home``.
88 Users may not know the actual installation directory and add ``/usr``
89 to their ``packages.yaml`` unknowingly. Run ``java_home`` if it exists
90 to determine exactly where it is installed. Specify which version we
91 are expecting in case multiple Java versions are installed.
92 See ``man java_home`` for more details."""
93
94 prefix = self.prefix
95 java_home = prefix.libexec.java_home
96 if os.path.exists(java_home):
97 java_home = Executable(java_home)
98 version = str(self.version.up_to(2))
99 prefix = java_home('--version', version, output=str).strip()
100 prefix = Prefix(prefix)
101
102 return prefix
103
104 @property
105 def libs(self):
106 """Depending on the version number and whether the full JDK or just
107 the JRE was installed, Java libraries can be in several locations:
108
109 * ``lib/libjvm.so``
110 * ``jre/lib/libjvm.dylib``
111
112 Search recursively to find the correct library location."""
113
114 return find_libraries(['libjvm'], root=self.home, recursive=True)
115
116 def install(self, spec, prefix):
117 install_tree('.', prefix)
118
119 def setup_run_environment(self, env):
120 """Set JAVA_HOME."""
121
122 env.set('JAVA_HOME', self.home)
123
124 def setup_dependent_build_environment(self, env, dependent_spec):
125 """Set JAVA_HOME and CLASSPATH.
126
127 CLASSPATH contains the installation prefix for the extension and any
128 other Java extensions it depends on."""
129
130 env.set('JAVA_HOME', self.home)
131
132 class_paths = []
133 for d in dependent_spec.traverse(deptype=('build', 'run', 'test')):
134 if d.package.extends(self.spec):
135 class_paths.extend(find(d.prefix, '*.jar'))
136
137 classpath = os.pathsep.join(class_paths)
138 env.set('CLASSPATH', classpath)
139
140 def setup_dependent_run_environment(self, env, dependent_spec):
141 """Set CLASSPATH.
142
143 CLASSPATH contains the installation prefix for the extension and any
144 other Java extensions it depends on."""
145 # For runtime environment set only the path for
146 # dependent_spec and prepend it to CLASSPATH
147 if dependent_spec.package.extends(self.spec):
148 class_paths = find(dependent_spec.prefix, '*.jar')
149 classpath = os.pathsep.join(class_paths)
150 env.prepend_path('CLASSPATH', classpath)
151
152 def setup_dependent_package(self, module, dependent_spec):
153 """Allows spec['java'].home to work."""
154
155 self.spec.home = self.home
156
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/var/spack/repos/builtin/packages/openjdk/package.py b/var/spack/repos/builtin/packages/openjdk/package.py
--- a/var/spack/repos/builtin/packages/openjdk/package.py
+++ b/var/spack/repos/builtin/packages/openjdk/package.py
@@ -7,6 +7,7 @@
import platform
import re
+from spack.util.prefix import Prefix
# If you need to add a new version, please be aware that:
# - versions in the following dict are automatically added to the package
|
{"golden_diff": "diff --git a/var/spack/repos/builtin/packages/openjdk/package.py b/var/spack/repos/builtin/packages/openjdk/package.py\n--- a/var/spack/repos/builtin/packages/openjdk/package.py\n+++ b/var/spack/repos/builtin/packages/openjdk/package.py\n@@ -7,6 +7,7 @@\n import platform\n import re\n \n+from spack.util.prefix import Prefix\n \n # If you need to add a new version, please be aware that:\n # - versions in the following dict are automatically added to the package\n", "issue": "Installation issue with openjdk: \"Error: NameError: name 'Prefix' is not defined\"\n### Steps to reproduce the issue\r\n\r\nLatest spack develop on OS X gives:\r\n\r\n```console\r\n \u2192 spack install -v hpcviewer\r\n==> Warning: Missing a source id for [email protected]_9\r\n[+] /usr (external openjdk-11.0.10_9-eoscwsxi5erdz5htmnixttlxjzhogare)\r\n==> Installing hpcviewer-2021.03-a4q2ajczhy6u5dxcdszfx3lelqbf7tmd\r\n==> No binary for hpcviewer-2021.03-a4q2ajczhy6u5dxcdszfx3lelqbf7tmd found: installing from source\r\n==> Warning: Missing a source id for [email protected]_9\r\n==> Warning: microarchitecture specific optimizations are not supported yet on mixed compiler toolchains [check [email protected] for further details]\r\n==> Error: NameError: name 'Prefix' is not defined\r\n```\r\n\r\n### Information on your system\r\n\r\n* Mac OS 10.15.5\r\n* Spack cloned today\n", "before_files": [{"content": "# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other\n# Spack Project Developers. See the top-level COPYRIGHT file for details.\n#\n# SPDX-License-Identifier: (Apache-2.0 OR MIT)\n\nimport os\nimport platform\nimport re\n\n\n# If you need to add a new version, please be aware that:\n# - versions in the following dict are automatically added to the package\n# - version tuple must be in the form (checksum, url)\n# - checksum must be sha256\n# - package key must be in the form '{os}-{arch}' where 'os' is in the\n# format returned by platform.system() and 'arch' by platform.machine()\n\n_versions = {\n '11.0.9.1_1': {\n 'Linux-ppc64le': ('d94b6b46a14ab0974b1c1b89661741126d8cf8a0068b471b8f5fa286a71636b1', 'https://github.com/AdoptOpenJDK/openjdk11-binaries/releases/download/jdk-11.0.9.1%2B1/OpenJDK11U-jdk_ppc64le_linux_hotspot_11.0.9.1_1.tar.gz')},\n '11.0.8_10': {\n 'Linux-x86_64': ('6e4cead158037cb7747ca47416474d4f408c9126be5b96f9befd532e0a762b47', 'https://github.com/AdoptOpenJDK/openjdk11-binaries/releases/download/jdk-11.0.8%2B10/OpenJDK11U-jdk_x64_linux_hotspot_11.0.8_10.tar.gz')},\n '11.0.0-2020-01-01': {\n 'Linux-aarch64': ('05c7d9c90edacd853850fbb0f52f8aa482809d0452c599cb9fe0b28b3b4bf329', 'https://github.com/AdoptOpenJDK/openjdk11-binaries/releases/download/jdk11u-2020-01-01-06-13/OpenJDK11U-jdk_aarch64_linux_hotspot_2020-01-01-06-13.tar.gz')},\n '11.0.2': {\n 'Linux-x86_64': ('99be79935354f5c0df1ad293620ea36d13f48ec3ea870c838f20c504c9668b57', 'https://download.java.net/java/GA/jdk11/9/GPL/openjdk-11.0.2_linux-x64_bin.tar.gz')},\n '11.0.1': {\n 'Linux-x86_64': ('7a6bb980b9c91c478421f865087ad2d69086a0583aeeb9e69204785e8e97dcfd', 'https://download.java.net/java/GA/jdk11/13/GPL/openjdk-11.0.1_linux-x64_bin.tar.gz')},\n '1.8.0_265-b01': {\n 'Linux-x86_64': ('1285da6278f2d38a790a21148d7e683f20de0799c44b937043830ef6b57f58c4', 'https://github.com/AdoptOpenJDK/openjdk8-binaries/releases/download/jdk8u265-b01/OpenJDK8U-jdk_x64_linux_hotspot_8u265b01.tar.gz')},\n '1.8.0_191-b12': {\n 'Linux-aarch64': ('8eee0aede947b804f9a5f49c8a38b52aace8a30a9ebd9383b7d06042fb5a237c', 'https://github.com/AdoptOpenJDK/openjdk8-binaries/releases/download/jdk8u191-b12/OpenJDK8U-jdk_aarch64_linux_hotspot_8u191b12.tar.gz')},\n '1.8.0_222-b10': {\n 'Linux-x86_64': ('20cff719c6de43f8bb58c7f59e251da7c1fa2207897c9a4768c8c669716dc819', 'https://github.com/AdoptOpenJDK/openjdk8-binaries/releases/download/jdk8u222-b10_openj9-0.15.1/OpenJDK8U-jdk_x64_linux_openj9_8u222b10_openj9-0.15.1.tar.gz')},\n '1.8.0_202-b08': {\n 'Linux-x86_64': ('533dcd8d9ca15df231a1eb392fa713a66bca85a8e76d9b4ee30975f3823636b7', 'https://github.com/AdoptOpenJDK/openjdk8-binaries/releases/download/jdk8u202-b08/OpenJDK8U-jdk_x64_linux_openj9_8u202b08_openj9-0.12.0.tar.gz')},\n '1.8.0_40-b25': {\n 'Linux-x86_64': ('79e96dce03a14271040023231a7d0ae374b755d48adf68bbdaec30294e4e2b88', 'https://download.java.net/openjdk/jdk8u40/ri/jdk_ri-8u40-b25-linux-x64-10_feb_2015.tar.gz')},\n}\n\n\nclass Openjdk(Package):\n \"\"\"The free and opensource java implementation\"\"\"\n\n homepage = \"https://jdk.java.net\"\n\n for ver, packages in _versions.items():\n key = \"{0}-{1}\".format(platform.system(), platform.machine())\n pkg = packages.get(key)\n if pkg:\n version(ver, sha256=pkg[0], url=pkg[1])\n\n provides('java@11', when='@11.0:11.99')\n provides('java@10', when='@10.0:10.99')\n provides('java@9', when='@9.0:9.99')\n provides('java@8', when='@1.8.0:1.8.999')\n\n conflicts('target=ppc64:', msg='openjdk is not available for ppc64 (big endian)')\n\n # FIXME:\n # 1. `extends('java')` doesn't work, you need to use `extends('openjdk')`\n # 2. Packages cannot extend multiple packages, see #987\n # 3. Update `YamlFilesystemView.merge` to allow a Package to completely\n # override how it is symlinked into a view prefix. Then, spack activate\n # can symlink all *.jar files to `prefix.lib.ext`\n extendable = True\n\n executables = ['^java$']\n\n @classmethod\n def determine_version(cls, exe):\n output = Executable(exe)('-version', output=str, error=str)\n\n # Make sure this is actually OpenJDK, not Oracle JDK\n if 'openjdk' not in output:\n return None\n\n match = re.search(r'\\(build (\\S+)\\)', output)\n return match.group(1).replace('+', '_') if match else None\n\n @property\n def home(self):\n \"\"\"Most of the time, ``JAVA_HOME`` is simply ``spec['java'].prefix``.\n However, if the user is using an externally installed JDK, it may be\n symlinked. For example, on macOS, the ``java`` executable can be found\n in ``/usr/bin``, but ``JAVA_HOME`` is actually\n ``/Library/Java/JavaVirtualMachines/jdk-10.0.1.jdk/Contents/Home``.\n Users may not know the actual installation directory and add ``/usr``\n to their ``packages.yaml`` unknowingly. Run ``java_home`` if it exists\n to determine exactly where it is installed. Specify which version we\n are expecting in case multiple Java versions are installed.\n See ``man java_home`` for more details.\"\"\"\n\n prefix = self.prefix\n java_home = prefix.libexec.java_home\n if os.path.exists(java_home):\n java_home = Executable(java_home)\n version = str(self.version.up_to(2))\n prefix = java_home('--version', version, output=str).strip()\n prefix = Prefix(prefix)\n\n return prefix\n\n @property\n def libs(self):\n \"\"\"Depending on the version number and whether the full JDK or just\n the JRE was installed, Java libraries can be in several locations:\n\n * ``lib/libjvm.so``\n * ``jre/lib/libjvm.dylib``\n\n Search recursively to find the correct library location.\"\"\"\n\n return find_libraries(['libjvm'], root=self.home, recursive=True)\n\n def install(self, spec, prefix):\n install_tree('.', prefix)\n\n def setup_run_environment(self, env):\n \"\"\"Set JAVA_HOME.\"\"\"\n\n env.set('JAVA_HOME', self.home)\n\n def setup_dependent_build_environment(self, env, dependent_spec):\n \"\"\"Set JAVA_HOME and CLASSPATH.\n\n CLASSPATH contains the installation prefix for the extension and any\n other Java extensions it depends on.\"\"\"\n\n env.set('JAVA_HOME', self.home)\n\n class_paths = []\n for d in dependent_spec.traverse(deptype=('build', 'run', 'test')):\n if d.package.extends(self.spec):\n class_paths.extend(find(d.prefix, '*.jar'))\n\n classpath = os.pathsep.join(class_paths)\n env.set('CLASSPATH', classpath)\n\n def setup_dependent_run_environment(self, env, dependent_spec):\n \"\"\"Set CLASSPATH.\n\n CLASSPATH contains the installation prefix for the extension and any\n other Java extensions it depends on.\"\"\"\n # For runtime environment set only the path for\n # dependent_spec and prepend it to CLASSPATH\n if dependent_spec.package.extends(self.spec):\n class_paths = find(dependent_spec.prefix, '*.jar')\n classpath = os.pathsep.join(class_paths)\n env.prepend_path('CLASSPATH', classpath)\n\n def setup_dependent_package(self, module, dependent_spec):\n \"\"\"Allows spec['java'].home to work.\"\"\"\n\n self.spec.home = self.home\n", "path": "var/spack/repos/builtin/packages/openjdk/package.py"}], "after_files": [{"content": "# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other\n# Spack Project Developers. See the top-level COPYRIGHT file for details.\n#\n# SPDX-License-Identifier: (Apache-2.0 OR MIT)\n\nimport os\nimport platform\nimport re\n\nfrom spack.util.prefix import Prefix\n\n# If you need to add a new version, please be aware that:\n# - versions in the following dict are automatically added to the package\n# - version tuple must be in the form (checksum, url)\n# - checksum must be sha256\n# - package key must be in the form '{os}-{arch}' where 'os' is in the\n# format returned by platform.system() and 'arch' by platform.machine()\n\n_versions = {\n '11.0.9.1_1': {\n 'Linux-ppc64le': ('d94b6b46a14ab0974b1c1b89661741126d8cf8a0068b471b8f5fa286a71636b1', 'https://github.com/AdoptOpenJDK/openjdk11-binaries/releases/download/jdk-11.0.9.1%2B1/OpenJDK11U-jdk_ppc64le_linux_hotspot_11.0.9.1_1.tar.gz')},\n '11.0.8_10': {\n 'Linux-x86_64': ('6e4cead158037cb7747ca47416474d4f408c9126be5b96f9befd532e0a762b47', 'https://github.com/AdoptOpenJDK/openjdk11-binaries/releases/download/jdk-11.0.8%2B10/OpenJDK11U-jdk_x64_linux_hotspot_11.0.8_10.tar.gz')},\n '11.0.0-2020-01-01': {\n 'Linux-aarch64': ('05c7d9c90edacd853850fbb0f52f8aa482809d0452c599cb9fe0b28b3b4bf329', 'https://github.com/AdoptOpenJDK/openjdk11-binaries/releases/download/jdk11u-2020-01-01-06-13/OpenJDK11U-jdk_aarch64_linux_hotspot_2020-01-01-06-13.tar.gz')},\n '11.0.2': {\n 'Linux-x86_64': ('99be79935354f5c0df1ad293620ea36d13f48ec3ea870c838f20c504c9668b57', 'https://download.java.net/java/GA/jdk11/9/GPL/openjdk-11.0.2_linux-x64_bin.tar.gz')},\n '11.0.1': {\n 'Linux-x86_64': ('7a6bb980b9c91c478421f865087ad2d69086a0583aeeb9e69204785e8e97dcfd', 'https://download.java.net/java/GA/jdk11/13/GPL/openjdk-11.0.1_linux-x64_bin.tar.gz')},\n '1.8.0_265-b01': {\n 'Linux-x86_64': ('1285da6278f2d38a790a21148d7e683f20de0799c44b937043830ef6b57f58c4', 'https://github.com/AdoptOpenJDK/openjdk8-binaries/releases/download/jdk8u265-b01/OpenJDK8U-jdk_x64_linux_hotspot_8u265b01.tar.gz')},\n '1.8.0_191-b12': {\n 'Linux-aarch64': ('8eee0aede947b804f9a5f49c8a38b52aace8a30a9ebd9383b7d06042fb5a237c', 'https://github.com/AdoptOpenJDK/openjdk8-binaries/releases/download/jdk8u191-b12/OpenJDK8U-jdk_aarch64_linux_hotspot_8u191b12.tar.gz')},\n '1.8.0_222-b10': {\n 'Linux-x86_64': ('20cff719c6de43f8bb58c7f59e251da7c1fa2207897c9a4768c8c669716dc819', 'https://github.com/AdoptOpenJDK/openjdk8-binaries/releases/download/jdk8u222-b10_openj9-0.15.1/OpenJDK8U-jdk_x64_linux_openj9_8u222b10_openj9-0.15.1.tar.gz')},\n '1.8.0_202-b08': {\n 'Linux-x86_64': ('533dcd8d9ca15df231a1eb392fa713a66bca85a8e76d9b4ee30975f3823636b7', 'https://github.com/AdoptOpenJDK/openjdk8-binaries/releases/download/jdk8u202-b08/OpenJDK8U-jdk_x64_linux_openj9_8u202b08_openj9-0.12.0.tar.gz')},\n '1.8.0_40-b25': {\n 'Linux-x86_64': ('79e96dce03a14271040023231a7d0ae374b755d48adf68bbdaec30294e4e2b88', 'https://download.java.net/openjdk/jdk8u40/ri/jdk_ri-8u40-b25-linux-x64-10_feb_2015.tar.gz')},\n}\n\n\nclass Openjdk(Package):\n \"\"\"The free and opensource java implementation\"\"\"\n\n homepage = \"https://jdk.java.net\"\n\n for ver, packages in _versions.items():\n key = \"{0}-{1}\".format(platform.system(), platform.machine())\n pkg = packages.get(key)\n if pkg:\n version(ver, sha256=pkg[0], url=pkg[1])\n\n provides('java@11', when='@11.0:11.99')\n provides('java@10', when='@10.0:10.99')\n provides('java@9', when='@9.0:9.99')\n provides('java@8', when='@1.8.0:1.8.999')\n\n conflicts('target=ppc64:', msg='openjdk is not available for ppc64 (big endian)')\n\n # FIXME:\n # 1. `extends('java')` doesn't work, you need to use `extends('openjdk')`\n # 2. Packages cannot extend multiple packages, see #987\n # 3. Update `YamlFilesystemView.merge` to allow a Package to completely\n # override how it is symlinked into a view prefix. Then, spack activate\n # can symlink all *.jar files to `prefix.lib.ext`\n extendable = True\n\n executables = ['^java$']\n\n @classmethod\n def determine_version(cls, exe):\n output = Executable(exe)('-version', output=str, error=str)\n\n # Make sure this is actually OpenJDK, not Oracle JDK\n if 'openjdk' not in output:\n return None\n\n match = re.search(r'\\(build (\\S+)\\)', output)\n return match.group(1).replace('+', '_') if match else None\n\n @property\n def home(self):\n \"\"\"Most of the time, ``JAVA_HOME`` is simply ``spec['java'].prefix``.\n However, if the user is using an externally installed JDK, it may be\n symlinked. For example, on macOS, the ``java`` executable can be found\n in ``/usr/bin``, but ``JAVA_HOME`` is actually\n ``/Library/Java/JavaVirtualMachines/jdk-10.0.1.jdk/Contents/Home``.\n Users may not know the actual installation directory and add ``/usr``\n to their ``packages.yaml`` unknowingly. Run ``java_home`` if it exists\n to determine exactly where it is installed. Specify which version we\n are expecting in case multiple Java versions are installed.\n See ``man java_home`` for more details.\"\"\"\n\n prefix = self.prefix\n java_home = prefix.libexec.java_home\n if os.path.exists(java_home):\n java_home = Executable(java_home)\n version = str(self.version.up_to(2))\n prefix = java_home('--version', version, output=str).strip()\n prefix = Prefix(prefix)\n\n return prefix\n\n @property\n def libs(self):\n \"\"\"Depending on the version number and whether the full JDK or just\n the JRE was installed, Java libraries can be in several locations:\n\n * ``lib/libjvm.so``\n * ``jre/lib/libjvm.dylib``\n\n Search recursively to find the correct library location.\"\"\"\n\n return find_libraries(['libjvm'], root=self.home, recursive=True)\n\n def install(self, spec, prefix):\n install_tree('.', prefix)\n\n def setup_run_environment(self, env):\n \"\"\"Set JAVA_HOME.\"\"\"\n\n env.set('JAVA_HOME', self.home)\n\n def setup_dependent_build_environment(self, env, dependent_spec):\n \"\"\"Set JAVA_HOME and CLASSPATH.\n\n CLASSPATH contains the installation prefix for the extension and any\n other Java extensions it depends on.\"\"\"\n\n env.set('JAVA_HOME', self.home)\n\n class_paths = []\n for d in dependent_spec.traverse(deptype=('build', 'run', 'test')):\n if d.package.extends(self.spec):\n class_paths.extend(find(d.prefix, '*.jar'))\n\n classpath = os.pathsep.join(class_paths)\n env.set('CLASSPATH', classpath)\n\n def setup_dependent_run_environment(self, env, dependent_spec):\n \"\"\"Set CLASSPATH.\n\n CLASSPATH contains the installation prefix for the extension and any\n other Java extensions it depends on.\"\"\"\n # For runtime environment set only the path for\n # dependent_spec and prepend it to CLASSPATH\n if dependent_spec.package.extends(self.spec):\n class_paths = find(dependent_spec.prefix, '*.jar')\n classpath = os.pathsep.join(class_paths)\n env.prepend_path('CLASSPATH', classpath)\n\n def setup_dependent_package(self, module, dependent_spec):\n \"\"\"Allows spec['java'].home to work.\"\"\"\n\n self.spec.home = self.home\n", "path": "var/spack/repos/builtin/packages/openjdk/package.py"}]}
| 3,542 | 111 |
gh_patches_debug_3019
|
rasdani/github-patches
|
git_diff
|
rucio__rucio-4790
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Fix setup_webui script
Motivation
----------
Script has a wrong import, needs to be fixed.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup_webui.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # Copyright 2015-2021 CERN
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15 #
16 # Authors:
17 # - Vincent Garonne <[email protected]>, 2015-2017
18 # - Martin Barisits <[email protected]>, 2016-2021
19 # - Benedikt Ziemons <[email protected]>, 2021
20
21 import os
22 import sys
23
24 from setuptools import setup
25
26
27 if sys.version_info < (3, 6):
28 print('ERROR: Rucio WebUI requires at least Python 3.6 to run.')
29 sys.exit(1)
30
31 try:
32 from setuputil import get_rucio_version
33 except ImportError:
34 sys.path.append(os.path.abspath(os.path.dirname(__file__)))
35 from setuputil import get_rucio_version
36
37 name = 'rucio-webui'
38 packages = ['rucio', 'rucio.web', 'rucio.web.ui', 'rucio.web.ui.flask', 'rucio.web.flask.common']
39 data_files = []
40 description = "Rucio WebUI Package"
41
42 setup(
43 name=name,
44 version=get_rucio_version(),
45 packages=packages,
46 package_dir={'': 'lib'},
47 data_files=None,
48 include_package_data=True,
49 scripts=None,
50 author="Rucio",
51 author_email="[email protected]",
52 description=description,
53 license="Apache License, Version 2.0",
54 url="https://rucio.cern.ch/",
55 python_requires=">=3.6, <4",
56 classifiers=[
57 'Development Status :: 5 - Production/Stable',
58 'License :: OSI Approved :: Apache Software License',
59 'Intended Audience :: Information Technology',
60 'Intended Audience :: System Administrators',
61 'Operating System :: POSIX :: Linux',
62 'Natural Language :: English',
63 'Programming Language :: Python',
64 'Programming Language :: Python :: 3',
65 'Programming Language :: Python :: 3.6',
66 'Programming Language :: Python :: 3.7',
67 'Programming Language :: Python :: 3.8',
68 'Programming Language :: Python :: 3.9',
69 'Environment :: No Input/Output (Daemon)', ],
70 install_requires=['rucio>=1.2.5', ],
71 )
72
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup_webui.py b/setup_webui.py
--- a/setup_webui.py
+++ b/setup_webui.py
@@ -35,7 +35,7 @@
from setuputil import get_rucio_version
name = 'rucio-webui'
-packages = ['rucio', 'rucio.web', 'rucio.web.ui', 'rucio.web.ui.flask', 'rucio.web.flask.common']
+packages = ['rucio', 'rucio.web', 'rucio.web.ui', 'rucio.web.ui.flask', 'rucio.web.ui.flask.common']
data_files = []
description = "Rucio WebUI Package"
|
{"golden_diff": "diff --git a/setup_webui.py b/setup_webui.py\n--- a/setup_webui.py\n+++ b/setup_webui.py\n@@ -35,7 +35,7 @@\n from setuputil import get_rucio_version\n \n name = 'rucio-webui'\n-packages = ['rucio', 'rucio.web', 'rucio.web.ui', 'rucio.web.ui.flask', 'rucio.web.flask.common']\n+packages = ['rucio', 'rucio.web', 'rucio.web.ui', 'rucio.web.ui.flask', 'rucio.web.ui.flask.common']\n data_files = []\n description = \"Rucio WebUI Package\"\n", "issue": "Fix setup_webui script\nMotivation\r\n----------\r\nScript has a wrong import, needs to be fixed.\r\n\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright 2015-2021 CERN\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Authors:\n# - Vincent Garonne <[email protected]>, 2015-2017\n# - Martin Barisits <[email protected]>, 2016-2021\n# - Benedikt Ziemons <[email protected]>, 2021\n\nimport os\nimport sys\n\nfrom setuptools import setup\n\n\nif sys.version_info < (3, 6):\n print('ERROR: Rucio WebUI requires at least Python 3.6 to run.')\n sys.exit(1)\n\ntry:\n from setuputil import get_rucio_version\nexcept ImportError:\n sys.path.append(os.path.abspath(os.path.dirname(__file__)))\n from setuputil import get_rucio_version\n\nname = 'rucio-webui'\npackages = ['rucio', 'rucio.web', 'rucio.web.ui', 'rucio.web.ui.flask', 'rucio.web.flask.common']\ndata_files = []\ndescription = \"Rucio WebUI Package\"\n\nsetup(\n name=name,\n version=get_rucio_version(),\n packages=packages,\n package_dir={'': 'lib'},\n data_files=None,\n include_package_data=True,\n scripts=None,\n author=\"Rucio\",\n author_email=\"[email protected]\",\n description=description,\n license=\"Apache License, Version 2.0\",\n url=\"https://rucio.cern.ch/\",\n python_requires=\">=3.6, <4\",\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'License :: OSI Approved :: Apache Software License',\n 'Intended Audience :: Information Technology',\n 'Intended Audience :: System Administrators',\n 'Operating System :: POSIX :: Linux',\n 'Natural Language :: English',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n 'Environment :: No Input/Output (Daemon)', ],\n install_requires=['rucio>=1.2.5', ],\n)\n", "path": "setup_webui.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright 2015-2021 CERN\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Authors:\n# - Vincent Garonne <[email protected]>, 2015-2017\n# - Martin Barisits <[email protected]>, 2016-2021\n# - Benedikt Ziemons <[email protected]>, 2021\n\nimport os\nimport sys\n\nfrom setuptools import setup\n\n\nif sys.version_info < (3, 6):\n print('ERROR: Rucio WebUI requires at least Python 3.6 to run.')\n sys.exit(1)\n\ntry:\n from setuputil import get_rucio_version\nexcept ImportError:\n sys.path.append(os.path.abspath(os.path.dirname(__file__)))\n from setuputil import get_rucio_version\n\nname = 'rucio-webui'\npackages = ['rucio', 'rucio.web', 'rucio.web.ui', 'rucio.web.ui.flask', 'rucio.web.ui.flask.common']\ndata_files = []\ndescription = \"Rucio WebUI Package\"\n\nsetup(\n name=name,\n version=get_rucio_version(),\n packages=packages,\n package_dir={'': 'lib'},\n data_files=None,\n include_package_data=True,\n scripts=None,\n author=\"Rucio\",\n author_email=\"[email protected]\",\n description=description,\n license=\"Apache License, Version 2.0\",\n url=\"https://rucio.cern.ch/\",\n python_requires=\">=3.6, <4\",\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'License :: OSI Approved :: Apache Software License',\n 'Intended Audience :: Information Technology',\n 'Intended Audience :: System Administrators',\n 'Operating System :: POSIX :: Linux',\n 'Natural Language :: English',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n 'Environment :: No Input/Output (Daemon)', ],\n install_requires=['rucio>=1.2.5', ],\n)\n", "path": "setup_webui.py"}]}
| 1,045 | 141 |
gh_patches_debug_26431
|
rasdani/github-patches
|
git_diff
|
tensorflow__tfx-91
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Import errors when trying to run Chicago Taxi on Dataflow
Similarly as in issue [#47](https://github.com/tensorflow/tfx/issues/47), I still have a problem with running CTE on Dataflow. When I use the code with no modifications, the error from previous issue persists - it seems that somehow the `try-except` around the imports doesn't do its job.
When I changed the code to include only the relative import in my fork [here](https://github.com/mwalenia/tfx/tree/import-fix), the problem disappeared, but another one manifested.
This time, there's a problem with importing `estimator` from tensorflow somewhere in the dependencies. Stacktrace:
```Traceback (most recent call last):
File "/usr/local/lib/python2.7/dist-packages/dataflow_worker/batchworker.py", line 773, in run
self._load_main_session(self.local_staging_directory)
File "/usr/local/lib/python2.7/dist-packages/dataflow_worker/batchworker.py", line 489, in _load_main_session
pickler.load_session(session_file)
File "/usr/local/lib/python2.7/dist-packages/apache_beam/internal/pickler.py", line 269, in load_session
return dill.load_session(file_path)
File "/usr/local/lib/python2.7/dist-packages/dill/_dill.py", line 410, in load_session
module = unpickler.load()
File "/usr/lib/python2.7/pickle.py", line 864, in load
dispatch[key](self)
File "/usr/lib/python2.7/pickle.py", line 1139, in load_reduce
value = func(*args)
File "/usr/local/lib/python2.7/dist-packages/dill/_dill.py", line 828, in _import_module
return getattr(__import__(module, None, None, [obj]), obj)
File "/usr/local/lib/python2.7/dist-packages/trainer/taxi.py", line 19, in <module>
from tensorflow_transform import coders as tft_coders
File "/usr/local/lib/python2.7/dist-packages/tensorflow_transform/__init__.py", line 19, in <module>
from tensorflow_transform.analyzers import *
File "/usr/local/lib/python2.7/dist-packages/tensorflow_transform/analyzers.py", line 39, in <module>
from tensorflow_transform import tf_utils
File "/usr/local/lib/python2.7/dist-packages/tensorflow_transform/tf_utils.py", line 24, in <module>
from tensorflow.contrib.proto.python.ops import encode_proto_op
File "/usr/local/lib/python2.7/dist-packages/tensorflow/contrib/__init__.py", line 48, in <module>
from tensorflow.contrib import distribute
File "/usr/local/lib/python2.7/dist-packages/tensorflow/contrib/distribute/__init__.py", line 34, in <module>
from tensorflow.contrib.distribute.python.tpu_strategy import TPUStrategy
File "/usr/local/lib/python2.7/dist-packages/tensorflow/contrib/distribute/python/tpu_strategy.py", line 27, in <module>
from tensorflow.contrib.tpu.python.ops import tpu_ops
File "/usr/local/lib/python2.7/dist-packages/tensorflow/contrib/tpu/__init__.py", line 73, in <module>
from tensorflow.contrib.tpu.python.tpu.keras_support import tpu_model as keras_to_tpu_model
File "/usr/local/lib/python2.7/dist-packages/tensorflow/contrib/tpu/python/tpu/keras_support.py", line 71, in <module>
from tensorflow.python.estimator import model_fn as model_fn_lib
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/estimator/__init__.py", line 25, in <module>
import tensorflow.python.estimator.estimator_lib
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/estimator/estimator_lib.py", line 22, in <module>
from tensorflow.python.estimator.canned.baseline import BaselineClassifier
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/estimator/canned/baseline.py", line 50, in <module>
from tensorflow.python.estimator import estimator
ImportError: cannot import name estimator
```
Is there anything I can do to fix this?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `tfx/examples/chicago_taxi/setup.py`
Content:
```
1 # Copyright 2019 Google LLC. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # https://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 """Setup dependencies for local and cloud deployment."""
15 import setuptools
16
17 # LINT.IfChange
18 TF_VERSION = '1.12.0'
19 # LINT.ThenChange(train_mlengine.sh, start_model_server_mlengine.sh)
20
21 # LINT.IfChange
22 BEAM_VERSION = '2.11.0'
23 # LINT.ThenChange(setup_beam_on_flink.sh)
24
25 if __name__ == '__main__':
26 setuptools.setup(
27 name='tfx_chicago_taxi',
28 version='0.12.0',
29 packages=setuptools.find_packages(),
30 install_requires=[
31 'apache-beam[gcp]==' + BEAM_VERSION,
32 'jupyter==1.0',
33 'numpy==1.14.5',
34 'protobuf==3.6.1',
35 'tensorflow==' + TF_VERSION,
36 'tensorflow-data-validation==0.12.0',
37 'tensorflow-metadata==0.12.1',
38 'tensorflow-model-analysis==0.12.1',
39 'tensorflow-serving-api==1.12.0',
40 'tensorflow-transform==0.12.0',
41 ],
42 python_requires='>=2.7,<3')
43
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/tfx/examples/chicago_taxi/setup.py b/tfx/examples/chicago_taxi/setup.py
--- a/tfx/examples/chicago_taxi/setup.py
+++ b/tfx/examples/chicago_taxi/setup.py
@@ -15,28 +15,29 @@
import setuptools
# LINT.IfChange
-TF_VERSION = '1.12.0'
+TF_VERSION = '1.13.1'
# LINT.ThenChange(train_mlengine.sh, start_model_server_mlengine.sh)
# LINT.IfChange
-BEAM_VERSION = '2.11.0'
+BEAM_VERSION = '2.12.0'
# LINT.ThenChange(setup_beam_on_flink.sh)
if __name__ == '__main__':
setuptools.setup(
name='tfx_chicago_taxi',
- version='0.12.0',
+ version='0.13.0',
packages=setuptools.find_packages(),
install_requires=[
- 'apache-beam[gcp]==' + BEAM_VERSION,
- 'jupyter==1.0',
- 'numpy==1.14.5',
- 'protobuf==3.6.1',
- 'tensorflow==' + TF_VERSION,
- 'tensorflow-data-validation==0.12.0',
- 'tensorflow-metadata==0.12.1',
- 'tensorflow-model-analysis==0.12.1',
- 'tensorflow-serving-api==1.12.0',
- 'tensorflow-transform==0.12.0',
+ 'apache-beam[gcp]>=' + BEAM_VERSION,
+ 'jupyter>=1.0,<2',
+ 'notebook>=5.7.8,<5.8',
+ 'numpy>=1.14.5,<2',
+ 'protobuf>=3.7.0,<3.8.0',
+ 'tensorflow>=' + TF_VERSION,
+ 'tensorflow-data-validation>=0.13.1,<0.14',
+ 'tensorflow-metadata>=0.13.1,<0.14',
+ 'tensorflow-model-analysis>=0.13.2,<0.14',
+ 'tensorflow-serving-api>=1.13.0,<1.14',
+ 'tensorflow-transform>=0.13.0,<0.14',
],
- python_requires='>=2.7,<3')
+ python_requires='>=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,<4',)
|
{"golden_diff": "diff --git a/tfx/examples/chicago_taxi/setup.py b/tfx/examples/chicago_taxi/setup.py\n--- a/tfx/examples/chicago_taxi/setup.py\n+++ b/tfx/examples/chicago_taxi/setup.py\n@@ -15,28 +15,29 @@\n import setuptools\n \n # LINT.IfChange\n-TF_VERSION = '1.12.0'\n+TF_VERSION = '1.13.1'\n # LINT.ThenChange(train_mlengine.sh, start_model_server_mlengine.sh)\n \n # LINT.IfChange\n-BEAM_VERSION = '2.11.0'\n+BEAM_VERSION = '2.12.0'\n # LINT.ThenChange(setup_beam_on_flink.sh)\n \n if __name__ == '__main__':\n setuptools.setup(\n name='tfx_chicago_taxi',\n- version='0.12.0',\n+ version='0.13.0',\n packages=setuptools.find_packages(),\n install_requires=[\n- 'apache-beam[gcp]==' + BEAM_VERSION,\n- 'jupyter==1.0',\n- 'numpy==1.14.5',\n- 'protobuf==3.6.1',\n- 'tensorflow==' + TF_VERSION,\n- 'tensorflow-data-validation==0.12.0',\n- 'tensorflow-metadata==0.12.1',\n- 'tensorflow-model-analysis==0.12.1',\n- 'tensorflow-serving-api==1.12.0',\n- 'tensorflow-transform==0.12.0',\n+ 'apache-beam[gcp]>=' + BEAM_VERSION,\n+ 'jupyter>=1.0,<2',\n+ 'notebook>=5.7.8,<5.8',\n+ 'numpy>=1.14.5,<2',\n+ 'protobuf>=3.7.0,<3.8.0',\n+ 'tensorflow>=' + TF_VERSION,\n+ 'tensorflow-data-validation>=0.13.1,<0.14',\n+ 'tensorflow-metadata>=0.13.1,<0.14',\n+ 'tensorflow-model-analysis>=0.13.2,<0.14',\n+ 'tensorflow-serving-api>=1.13.0,<1.14',\n+ 'tensorflow-transform>=0.13.0,<0.14',\n ],\n- python_requires='>=2.7,<3')\n+ python_requires='>=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,<4',)\n", "issue": "Import errors when trying to run Chicago Taxi on Dataflow\nSimilarly as in issue [#47](https://github.com/tensorflow/tfx/issues/47), I still have a problem with running CTE on Dataflow. When I use the code with no modifications, the error from previous issue persists - it seems that somehow the `try-except` around the imports doesn't do its job.\r\n\r\nWhen I changed the code to include only the relative import in my fork [here](https://github.com/mwalenia/tfx/tree/import-fix), the problem disappeared, but another one manifested.\r\n\r\nThis time, there's a problem with importing `estimator` from tensorflow somewhere in the dependencies. Stacktrace:\r\n\r\n```Traceback (most recent call last):\r\n File \"/usr/local/lib/python2.7/dist-packages/dataflow_worker/batchworker.py\", line 773, in run\r\n self._load_main_session(self.local_staging_directory)\r\n File \"/usr/local/lib/python2.7/dist-packages/dataflow_worker/batchworker.py\", line 489, in _load_main_session\r\n pickler.load_session(session_file)\r\n File \"/usr/local/lib/python2.7/dist-packages/apache_beam/internal/pickler.py\", line 269, in load_session\r\n return dill.load_session(file_path)\r\n File \"/usr/local/lib/python2.7/dist-packages/dill/_dill.py\", line 410, in load_session\r\n module = unpickler.load()\r\n File \"/usr/lib/python2.7/pickle.py\", line 864, in load\r\n dispatch[key](self)\r\n File \"/usr/lib/python2.7/pickle.py\", line 1139, in load_reduce\r\n value = func(*args)\r\n File \"/usr/local/lib/python2.7/dist-packages/dill/_dill.py\", line 828, in _import_module\r\n return getattr(__import__(module, None, None, [obj]), obj)\r\n File \"/usr/local/lib/python2.7/dist-packages/trainer/taxi.py\", line 19, in <module>\r\n from tensorflow_transform import coders as tft_coders\r\n File \"/usr/local/lib/python2.7/dist-packages/tensorflow_transform/__init__.py\", line 19, in <module>\r\n from tensorflow_transform.analyzers import *\r\n File \"/usr/local/lib/python2.7/dist-packages/tensorflow_transform/analyzers.py\", line 39, in <module>\r\n from tensorflow_transform import tf_utils\r\n File \"/usr/local/lib/python2.7/dist-packages/tensorflow_transform/tf_utils.py\", line 24, in <module>\r\n from tensorflow.contrib.proto.python.ops import encode_proto_op\r\n File \"/usr/local/lib/python2.7/dist-packages/tensorflow/contrib/__init__.py\", line 48, in <module>\r\n from tensorflow.contrib import distribute\r\n File \"/usr/local/lib/python2.7/dist-packages/tensorflow/contrib/distribute/__init__.py\", line 34, in <module>\r\n from tensorflow.contrib.distribute.python.tpu_strategy import TPUStrategy\r\n File \"/usr/local/lib/python2.7/dist-packages/tensorflow/contrib/distribute/python/tpu_strategy.py\", line 27, in <module>\r\n from tensorflow.contrib.tpu.python.ops import tpu_ops\r\n File \"/usr/local/lib/python2.7/dist-packages/tensorflow/contrib/tpu/__init__.py\", line 73, in <module>\r\n from tensorflow.contrib.tpu.python.tpu.keras_support import tpu_model as keras_to_tpu_model\r\n File \"/usr/local/lib/python2.7/dist-packages/tensorflow/contrib/tpu/python/tpu/keras_support.py\", line 71, in <module>\r\n from tensorflow.python.estimator import model_fn as model_fn_lib\r\n File \"/usr/local/lib/python2.7/dist-packages/tensorflow/python/estimator/__init__.py\", line 25, in <module>\r\n import tensorflow.python.estimator.estimator_lib\r\n File \"/usr/local/lib/python2.7/dist-packages/tensorflow/python/estimator/estimator_lib.py\", line 22, in <module>\r\n from tensorflow.python.estimator.canned.baseline import BaselineClassifier\r\n File \"/usr/local/lib/python2.7/dist-packages/tensorflow/python/estimator/canned/baseline.py\", line 50, in <module>\r\n from tensorflow.python.estimator import estimator\r\nImportError: cannot import name estimator\r\n```\r\n\r\nIs there anything I can do to fix this? \n", "before_files": [{"content": "# Copyright 2019 Google LLC. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Setup dependencies for local and cloud deployment.\"\"\"\nimport setuptools\n\n# LINT.IfChange\nTF_VERSION = '1.12.0'\n# LINT.ThenChange(train_mlengine.sh, start_model_server_mlengine.sh)\n\n# LINT.IfChange\nBEAM_VERSION = '2.11.0'\n# LINT.ThenChange(setup_beam_on_flink.sh)\n\nif __name__ == '__main__':\n setuptools.setup(\n name='tfx_chicago_taxi',\n version='0.12.0',\n packages=setuptools.find_packages(),\n install_requires=[\n 'apache-beam[gcp]==' + BEAM_VERSION,\n 'jupyter==1.0',\n 'numpy==1.14.5',\n 'protobuf==3.6.1',\n 'tensorflow==' + TF_VERSION,\n 'tensorflow-data-validation==0.12.0',\n 'tensorflow-metadata==0.12.1',\n 'tensorflow-model-analysis==0.12.1',\n 'tensorflow-serving-api==1.12.0',\n 'tensorflow-transform==0.12.0',\n ],\n python_requires='>=2.7,<3')\n", "path": "tfx/examples/chicago_taxi/setup.py"}], "after_files": [{"content": "# Copyright 2019 Google LLC. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Setup dependencies for local and cloud deployment.\"\"\"\nimport setuptools\n\n# LINT.IfChange\nTF_VERSION = '1.13.1'\n# LINT.ThenChange(train_mlengine.sh, start_model_server_mlengine.sh)\n\n# LINT.IfChange\nBEAM_VERSION = '2.12.0'\n# LINT.ThenChange(setup_beam_on_flink.sh)\n\nif __name__ == '__main__':\n setuptools.setup(\n name='tfx_chicago_taxi',\n version='0.13.0',\n packages=setuptools.find_packages(),\n install_requires=[\n 'apache-beam[gcp]>=' + BEAM_VERSION,\n 'jupyter>=1.0,<2',\n 'notebook>=5.7.8,<5.8',\n 'numpy>=1.14.5,<2',\n 'protobuf>=3.7.0,<3.8.0',\n 'tensorflow>=' + TF_VERSION,\n 'tensorflow-data-validation>=0.13.1,<0.14',\n 'tensorflow-metadata>=0.13.1,<0.14',\n 'tensorflow-model-analysis>=0.13.2,<0.14',\n 'tensorflow-serving-api>=1.13.0,<1.14',\n 'tensorflow-transform>=0.13.0,<0.14',\n ],\n python_requires='>=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,<4',)\n", "path": "tfx/examples/chicago_taxi/setup.py"}]}
| 1,700 | 567 |
gh_patches_debug_10568
|
rasdani/github-patches
|
git_diff
|
buildbot__buildbot-3583
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Buildbot should check for duplicates in addURL
In a step if we call self.addURL() multiple times with same URL, buildbot doesn't check if URL is already added, and adds the duplicate URL. Buildbot should check if the URL is already added before adding the URL.
This happens when a script parses a lot of output and generate links for various things. We can manually maintain a list ourself and check for duplicates before calling addURL(), but it feels so basic that buildbot should be doing this check instead of callers of addURL() checking for duplicates.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `master/buildbot/db/steps.py`
Content:
```
1 # This file is part of Buildbot. Buildbot is free software: you can
2 # redistribute it and/or modify it under the terms of the GNU General Public
3 # License as published by the Free Software Foundation, version 2.
4 #
5 # This program is distributed in the hope that it will be useful, but WITHOUT
6 # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
7 # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
8 # details.
9 #
10 # You should have received a copy of the GNU General Public License along with
11 # this program; if not, write to the Free Software Foundation, Inc., 51
12 # Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
13 #
14 # Copyright Buildbot Team Members
15
16 from __future__ import absolute_import
17 from __future__ import print_function
18
19 import json
20
21 import sqlalchemy as sa
22
23 from twisted.internet import defer
24 from twisted.internet import reactor
25
26 from buildbot.db import base
27 from buildbot.util import epoch2datetime
28
29
30 class StepsConnectorComponent(base.DBConnectorComponent):
31 # Documentation is in developer/db.rst
32 url_lock = None
33
34 def getStep(self, stepid=None, buildid=None, number=None, name=None):
35 tbl = self.db.model.steps
36 if stepid is not None:
37 wc = (tbl.c.id == stepid)
38 else:
39 if buildid is None:
40 return defer.fail(RuntimeError('must supply either stepid or buildid'))
41 if number is not None:
42 wc = (tbl.c.number == number)
43 elif name is not None:
44 wc = (tbl.c.name == name)
45 else:
46 return defer.fail(RuntimeError('must supply either number or name'))
47 wc = wc & (tbl.c.buildid == buildid)
48
49 def thd(conn):
50 q = self.db.model.steps.select(whereclause=wc)
51 res = conn.execute(q)
52 row = res.fetchone()
53
54 rv = None
55 if row:
56 rv = self._stepdictFromRow(row)
57 res.close()
58 return rv
59 return self.db.pool.do(thd)
60
61 def getSteps(self, buildid):
62 def thd(conn):
63 tbl = self.db.model.steps
64 q = tbl.select()
65 q = q.where(tbl.c.buildid == buildid)
66 q = q.order_by(tbl.c.number)
67 res = conn.execute(q)
68 return [self._stepdictFromRow(row) for row in res.fetchall()]
69 return self.db.pool.do(thd)
70
71 def addStep(self, buildid, name, state_string):
72 def thd(conn):
73 tbl = self.db.model.steps
74 # get the highest current number
75 r = conn.execute(sa.select([sa.func.max(tbl.c.number)],
76 whereclause=(tbl.c.buildid == buildid)))
77 number = r.scalar()
78 number = 0 if number is None else number + 1
79
80 # note that there is no chance for a race condition here,
81 # since only one master is inserting steps. If there is a
82 # conflict, then the name is likely already taken.
83 insert_row = dict(buildid=buildid, number=number,
84 started_at=None, complete_at=None,
85 state_string=state_string,
86 urls_json='[]', name=name)
87 try:
88 r = conn.execute(self.db.model.steps.insert(), insert_row)
89 got_id = r.inserted_primary_key[0]
90 except (sa.exc.IntegrityError, sa.exc.ProgrammingError):
91 got_id = None
92
93 if got_id:
94 return (got_id, number, name)
95
96 # we didn't get an id, so calculate a unique name and use that
97 # instead. Because names are truncated at the right to fit in a
98 # 50-character identifier, this isn't a simple query.
99 res = conn.execute(sa.select([tbl.c.name],
100 whereclause=((tbl.c.buildid == buildid))))
101 names = set([row[0] for row in res])
102 num = 1
103 while True:
104 numstr = '_%d' % num
105 newname = name[:50 - len(numstr)] + numstr
106 if newname not in names:
107 break
108 num += 1
109 insert_row['name'] = newname
110 r = conn.execute(self.db.model.steps.insert(), insert_row)
111 got_id = r.inserted_primary_key[0]
112 return (got_id, number, newname)
113 return self.db.pool.do(thd)
114
115 def startStep(self, stepid, _reactor=reactor):
116 started_at = _reactor.seconds()
117
118 def thd(conn):
119 tbl = self.db.model.steps
120 q = tbl.update(whereclause=(tbl.c.id == stepid))
121 conn.execute(q, started_at=started_at)
122 return self.db.pool.do(thd)
123
124 def setStepStateString(self, stepid, state_string):
125 def thd(conn):
126 tbl = self.db.model.steps
127 q = tbl.update(whereclause=(tbl.c.id == stepid))
128 conn.execute(q, state_string=state_string)
129 return self.db.pool.do(thd)
130
131 def addURL(self, stepid, name, url, _racehook=None):
132 # This methods adds an URL to the db
133 # This is a read modify write and thus there is a possibility
134 # that several urls are added at the same time (e.g with a deferredlist
135 # at the end of a step)
136 # this race condition is only inside the same master, as only one master
137 # is supposed to add urls to a buildstep.
138 # so threading.lock is used, as we are in the thread pool
139 if self.url_lock is None:
140 # this runs in reactor thread, so no race here..
141 self.url_lock = defer.DeferredLock()
142
143 def thd(conn):
144
145 tbl = self.db.model.steps
146 wc = (tbl.c.id == stepid)
147 q = sa.select([tbl.c.urls_json],
148 whereclause=wc)
149 res = conn.execute(q)
150 row = res.fetchone()
151 if _racehook is not None:
152 _racehook()
153 urls = json.loads(row.urls_json)
154 urls.append(dict(name=name, url=url))
155
156 q = tbl.update(whereclause=wc)
157 conn.execute(q, urls_json=json.dumps(urls))
158
159 return self.url_lock.run(lambda: self.db.pool.do(thd))
160
161 def finishStep(self, stepid, results, hidden, _reactor=reactor):
162 def thd(conn):
163 tbl = self.db.model.steps
164 q = tbl.update(whereclause=(tbl.c.id == stepid))
165 conn.execute(q,
166 complete_at=_reactor.seconds(),
167 results=results,
168 hidden=1 if hidden else 0)
169 return self.db.pool.do(thd)
170
171 def _stepdictFromRow(self, row):
172 def mkdt(epoch):
173 if epoch:
174 return epoch2datetime(epoch)
175
176 return dict(
177 id=row.id,
178 number=row.number,
179 name=row.name,
180 buildid=row.buildid,
181 started_at=mkdt(row.started_at),
182 complete_at=mkdt(row.complete_at),
183 state_string=row.state_string,
184 results=row.results,
185 urls=json.loads(row.urls_json),
186 hidden=bool(row.hidden))
187
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/master/buildbot/db/steps.py b/master/buildbot/db/steps.py
--- a/master/buildbot/db/steps.py
+++ b/master/buildbot/db/steps.py
@@ -151,10 +151,13 @@
if _racehook is not None:
_racehook()
urls = json.loads(row.urls_json)
- urls.append(dict(name=name, url=url))
- q = tbl.update(whereclause=wc)
- conn.execute(q, urls_json=json.dumps(urls))
+ url_item = dict(name=name, url=url)
+
+ if url_item not in urls:
+ urls.append(url_item)
+ q = tbl.update(whereclause=wc)
+ conn.execute(q, urls_json=json.dumps(urls))
return self.url_lock.run(lambda: self.db.pool.do(thd))
|
{"golden_diff": "diff --git a/master/buildbot/db/steps.py b/master/buildbot/db/steps.py\n--- a/master/buildbot/db/steps.py\n+++ b/master/buildbot/db/steps.py\n@@ -151,10 +151,13 @@\n if _racehook is not None:\n _racehook()\n urls = json.loads(row.urls_json)\n- urls.append(dict(name=name, url=url))\n \n- q = tbl.update(whereclause=wc)\n- conn.execute(q, urls_json=json.dumps(urls))\n+ url_item = dict(name=name, url=url)\n+\n+ if url_item not in urls:\n+ urls.append(url_item)\n+ q = tbl.update(whereclause=wc)\n+ conn.execute(q, urls_json=json.dumps(urls))\n \n return self.url_lock.run(lambda: self.db.pool.do(thd))\n", "issue": "Buildbot should check for duplicates in addURL \nIn a step if we call self.addURL() multiple times with same URL, buildbot doesn't check if URL is already added, and adds the duplicate URL. Buildbot should check if the URL is already added before adding the URL.\r\n\r\nThis happens when a script parses a lot of output and generate links for various things. We can manually maintain a list ourself and check for duplicates before calling addURL(), but it feels so basic that buildbot should be doing this check instead of callers of addURL() checking for duplicates.\r\n\n", "before_files": [{"content": "# This file is part of Buildbot. Buildbot is free software: you can\n# redistribute it and/or modify it under the terms of the GNU General Public\n# License as published by the Free Software Foundation, version 2.\n#\n# This program is distributed in the hope that it will be useful, but WITHOUT\n# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS\n# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more\n# details.\n#\n# You should have received a copy of the GNU General Public License along with\n# this program; if not, write to the Free Software Foundation, Inc., 51\n# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n#\n# Copyright Buildbot Team Members\n\nfrom __future__ import absolute_import\nfrom __future__ import print_function\n\nimport json\n\nimport sqlalchemy as sa\n\nfrom twisted.internet import defer\nfrom twisted.internet import reactor\n\nfrom buildbot.db import base\nfrom buildbot.util import epoch2datetime\n\n\nclass StepsConnectorComponent(base.DBConnectorComponent):\n # Documentation is in developer/db.rst\n url_lock = None\n\n def getStep(self, stepid=None, buildid=None, number=None, name=None):\n tbl = self.db.model.steps\n if stepid is not None:\n wc = (tbl.c.id == stepid)\n else:\n if buildid is None:\n return defer.fail(RuntimeError('must supply either stepid or buildid'))\n if number is not None:\n wc = (tbl.c.number == number)\n elif name is not None:\n wc = (tbl.c.name == name)\n else:\n return defer.fail(RuntimeError('must supply either number or name'))\n wc = wc & (tbl.c.buildid == buildid)\n\n def thd(conn):\n q = self.db.model.steps.select(whereclause=wc)\n res = conn.execute(q)\n row = res.fetchone()\n\n rv = None\n if row:\n rv = self._stepdictFromRow(row)\n res.close()\n return rv\n return self.db.pool.do(thd)\n\n def getSteps(self, buildid):\n def thd(conn):\n tbl = self.db.model.steps\n q = tbl.select()\n q = q.where(tbl.c.buildid == buildid)\n q = q.order_by(tbl.c.number)\n res = conn.execute(q)\n return [self._stepdictFromRow(row) for row in res.fetchall()]\n return self.db.pool.do(thd)\n\n def addStep(self, buildid, name, state_string):\n def thd(conn):\n tbl = self.db.model.steps\n # get the highest current number\n r = conn.execute(sa.select([sa.func.max(tbl.c.number)],\n whereclause=(tbl.c.buildid == buildid)))\n number = r.scalar()\n number = 0 if number is None else number + 1\n\n # note that there is no chance for a race condition here,\n # since only one master is inserting steps. If there is a\n # conflict, then the name is likely already taken.\n insert_row = dict(buildid=buildid, number=number,\n started_at=None, complete_at=None,\n state_string=state_string,\n urls_json='[]', name=name)\n try:\n r = conn.execute(self.db.model.steps.insert(), insert_row)\n got_id = r.inserted_primary_key[0]\n except (sa.exc.IntegrityError, sa.exc.ProgrammingError):\n got_id = None\n\n if got_id:\n return (got_id, number, name)\n\n # we didn't get an id, so calculate a unique name and use that\n # instead. Because names are truncated at the right to fit in a\n # 50-character identifier, this isn't a simple query.\n res = conn.execute(sa.select([tbl.c.name],\n whereclause=((tbl.c.buildid == buildid))))\n names = set([row[0] for row in res])\n num = 1\n while True:\n numstr = '_%d' % num\n newname = name[:50 - len(numstr)] + numstr\n if newname not in names:\n break\n num += 1\n insert_row['name'] = newname\n r = conn.execute(self.db.model.steps.insert(), insert_row)\n got_id = r.inserted_primary_key[0]\n return (got_id, number, newname)\n return self.db.pool.do(thd)\n\n def startStep(self, stepid, _reactor=reactor):\n started_at = _reactor.seconds()\n\n def thd(conn):\n tbl = self.db.model.steps\n q = tbl.update(whereclause=(tbl.c.id == stepid))\n conn.execute(q, started_at=started_at)\n return self.db.pool.do(thd)\n\n def setStepStateString(self, stepid, state_string):\n def thd(conn):\n tbl = self.db.model.steps\n q = tbl.update(whereclause=(tbl.c.id == stepid))\n conn.execute(q, state_string=state_string)\n return self.db.pool.do(thd)\n\n def addURL(self, stepid, name, url, _racehook=None):\n # This methods adds an URL to the db\n # This is a read modify write and thus there is a possibility\n # that several urls are added at the same time (e.g with a deferredlist\n # at the end of a step)\n # this race condition is only inside the same master, as only one master\n # is supposed to add urls to a buildstep.\n # so threading.lock is used, as we are in the thread pool\n if self.url_lock is None:\n # this runs in reactor thread, so no race here..\n self.url_lock = defer.DeferredLock()\n\n def thd(conn):\n\n tbl = self.db.model.steps\n wc = (tbl.c.id == stepid)\n q = sa.select([tbl.c.urls_json],\n whereclause=wc)\n res = conn.execute(q)\n row = res.fetchone()\n if _racehook is not None:\n _racehook()\n urls = json.loads(row.urls_json)\n urls.append(dict(name=name, url=url))\n\n q = tbl.update(whereclause=wc)\n conn.execute(q, urls_json=json.dumps(urls))\n\n return self.url_lock.run(lambda: self.db.pool.do(thd))\n\n def finishStep(self, stepid, results, hidden, _reactor=reactor):\n def thd(conn):\n tbl = self.db.model.steps\n q = tbl.update(whereclause=(tbl.c.id == stepid))\n conn.execute(q,\n complete_at=_reactor.seconds(),\n results=results,\n hidden=1 if hidden else 0)\n return self.db.pool.do(thd)\n\n def _stepdictFromRow(self, row):\n def mkdt(epoch):\n if epoch:\n return epoch2datetime(epoch)\n\n return dict(\n id=row.id,\n number=row.number,\n name=row.name,\n buildid=row.buildid,\n started_at=mkdt(row.started_at),\n complete_at=mkdt(row.complete_at),\n state_string=row.state_string,\n results=row.results,\n urls=json.loads(row.urls_json),\n hidden=bool(row.hidden))\n", "path": "master/buildbot/db/steps.py"}], "after_files": [{"content": "# This file is part of Buildbot. Buildbot is free software: you can\n# redistribute it and/or modify it under the terms of the GNU General Public\n# License as published by the Free Software Foundation, version 2.\n#\n# This program is distributed in the hope that it will be useful, but WITHOUT\n# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS\n# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more\n# details.\n#\n# You should have received a copy of the GNU General Public License along with\n# this program; if not, write to the Free Software Foundation, Inc., 51\n# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n#\n# Copyright Buildbot Team Members\n\nfrom __future__ import absolute_import\nfrom __future__ import print_function\n\nimport json\n\nimport sqlalchemy as sa\n\nfrom twisted.internet import defer\nfrom twisted.internet import reactor\n\nfrom buildbot.db import base\nfrom buildbot.util import epoch2datetime\n\n\nclass StepsConnectorComponent(base.DBConnectorComponent):\n # Documentation is in developer/db.rst\n url_lock = None\n\n def getStep(self, stepid=None, buildid=None, number=None, name=None):\n tbl = self.db.model.steps\n if stepid is not None:\n wc = (tbl.c.id == stepid)\n else:\n if buildid is None:\n return defer.fail(RuntimeError('must supply either stepid or buildid'))\n if number is not None:\n wc = (tbl.c.number == number)\n elif name is not None:\n wc = (tbl.c.name == name)\n else:\n return defer.fail(RuntimeError('must supply either number or name'))\n wc = wc & (tbl.c.buildid == buildid)\n\n def thd(conn):\n q = self.db.model.steps.select(whereclause=wc)\n res = conn.execute(q)\n row = res.fetchone()\n\n rv = None\n if row:\n rv = self._stepdictFromRow(row)\n res.close()\n return rv\n return self.db.pool.do(thd)\n\n def getSteps(self, buildid):\n def thd(conn):\n tbl = self.db.model.steps\n q = tbl.select()\n q = q.where(tbl.c.buildid == buildid)\n q = q.order_by(tbl.c.number)\n res = conn.execute(q)\n return [self._stepdictFromRow(row) for row in res.fetchall()]\n return self.db.pool.do(thd)\n\n def addStep(self, buildid, name, state_string):\n def thd(conn):\n tbl = self.db.model.steps\n # get the highest current number\n r = conn.execute(sa.select([sa.func.max(tbl.c.number)],\n whereclause=(tbl.c.buildid == buildid)))\n number = r.scalar()\n number = 0 if number is None else number + 1\n\n # note that there is no chance for a race condition here,\n # since only one master is inserting steps. If there is a\n # conflict, then the name is likely already taken.\n insert_row = dict(buildid=buildid, number=number,\n started_at=None, complete_at=None,\n state_string=state_string,\n urls_json='[]', name=name)\n try:\n r = conn.execute(self.db.model.steps.insert(), insert_row)\n got_id = r.inserted_primary_key[0]\n except (sa.exc.IntegrityError, sa.exc.ProgrammingError):\n got_id = None\n\n if got_id:\n return (got_id, number, name)\n\n # we didn't get an id, so calculate a unique name and use that\n # instead. Because names are truncated at the right to fit in a\n # 50-character identifier, this isn't a simple query.\n res = conn.execute(sa.select([tbl.c.name],\n whereclause=((tbl.c.buildid == buildid))))\n names = set([row[0] for row in res])\n num = 1\n while True:\n numstr = '_%d' % num\n newname = name[:50 - len(numstr)] + numstr\n if newname not in names:\n break\n num += 1\n insert_row['name'] = newname\n r = conn.execute(self.db.model.steps.insert(), insert_row)\n got_id = r.inserted_primary_key[0]\n return (got_id, number, newname)\n return self.db.pool.do(thd)\n\n def startStep(self, stepid, _reactor=reactor):\n started_at = _reactor.seconds()\n\n def thd(conn):\n tbl = self.db.model.steps\n q = tbl.update(whereclause=(tbl.c.id == stepid))\n conn.execute(q, started_at=started_at)\n return self.db.pool.do(thd)\n\n def setStepStateString(self, stepid, state_string):\n def thd(conn):\n tbl = self.db.model.steps\n q = tbl.update(whereclause=(tbl.c.id == stepid))\n conn.execute(q, state_string=state_string)\n return self.db.pool.do(thd)\n\n def addURL(self, stepid, name, url, _racehook=None):\n # This methods adds an URL to the db\n # This is a read modify write and thus there is a possibility\n # that several urls are added at the same time (e.g with a deferredlist\n # at the end of a step)\n # this race condition is only inside the same master, as only one master\n # is supposed to add urls to a buildstep.\n # so threading.lock is used, as we are in the thread pool\n if self.url_lock is None:\n # this runs in reactor thread, so no race here..\n self.url_lock = defer.DeferredLock()\n\n def thd(conn):\n\n tbl = self.db.model.steps\n wc = (tbl.c.id == stepid)\n q = sa.select([tbl.c.urls_json],\n whereclause=wc)\n res = conn.execute(q)\n row = res.fetchone()\n if _racehook is not None:\n _racehook()\n urls = json.loads(row.urls_json)\n\n url_item = dict(name=name, url=url)\n\n if url_item not in urls:\n urls.append(url_item)\n q = tbl.update(whereclause=wc)\n conn.execute(q, urls_json=json.dumps(urls))\n\n return self.url_lock.run(lambda: self.db.pool.do(thd))\n\n def finishStep(self, stepid, results, hidden, _reactor=reactor):\n def thd(conn):\n tbl = self.db.model.steps\n q = tbl.update(whereclause=(tbl.c.id == stepid))\n conn.execute(q,\n complete_at=_reactor.seconds(),\n results=results,\n hidden=1 if hidden else 0)\n return self.db.pool.do(thd)\n\n def _stepdictFromRow(self, row):\n def mkdt(epoch):\n if epoch:\n return epoch2datetime(epoch)\n\n return dict(\n id=row.id,\n number=row.number,\n name=row.name,\n buildid=row.buildid,\n started_at=mkdt(row.started_at),\n complete_at=mkdt(row.complete_at),\n state_string=row.state_string,\n results=row.results,\n urls=json.loads(row.urls_json),\n hidden=bool(row.hidden))\n", "path": "master/buildbot/db/steps.py"}]}
| 2,420 | 182 |
gh_patches_debug_30308
|
rasdani/github-patches
|
git_diff
|
microsoft__Qcodes-450
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Unused class FileWrapper
In the io manager module there is a FileWrapper class that seems to be never used anywhere?
Aside from the dubious utility of such a class (maybe if documented one could figure out) , maybe we should just remove it ?
@QCoDeS/core ?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `qcodes/data/io.py`
Content:
```
1 """
2 IO managers for QCodes.
3
4 IO managers wrap whatever physical storage layer the user wants to use
5 in an interface mimicking the built-in <open> context manager, with
6 some restrictions to minimize the overhead in creating new IO managers.
7
8 The main thing these managers need to implement is the open context manager:
9
10 - Only the context manager needs to be implemented, not separate
11 open function and close methods.
12
13 - open takes the standard parameters:
14
15 - filename: (string)
16 - mode: (string) only 'r' (read), 'w' (write), and 'a' (append) are
17 expected to be implemented. As with normal file objects, the only
18 difference between write and append is that write empties the file
19 before adding new data, and append leaves the existing contents in
20 place but starts writing at the end.
21 - encoding: If a special output encoding is desired. i.e. 'utf8
22
23 - the file-like object returned should implement a minimal set of operations.
24
25 In read mode:
26 - read([size]): read to the end or at most size bytes into a string
27 - readline([size]): read until a newline or up to size bytes, into a string
28 - iter(): usually return self, but can be any iterator over lines
29 - next(): assuming iter() returns self, this yields the next line.
30
31 (note: iter and next can be constructed automatically by FileWrapper
32 if you implement readline.)
33
34 In write or append mode:
35 - write(s): add string s to the end of the file.
36 - writelines(seq): add a sequence of strings (can be constructed
37 automatically if you use FileWrapper)
38
39 IO managers should also implement:
40
41 - a join method, ala os.path.join(\*args).
42 - a list method, that returns all objects matching location
43 - a remove method, ala os.remove(path) except that it will remove directories
44 as well as files, since we're allowing "locations" to be directories
45 or files.
46 """
47
48 from contextlib import contextmanager
49 import os
50 import re
51 import shutil
52 from fnmatch import fnmatch
53
54 ALLOWED_OPEN_MODES = ('r', 'w', 'a')
55
56
57 class DiskIO:
58
59 """
60 Simple IO object to wrap disk operations with a custom base location.
61
62 Also accepts both forward and backward slashes at any point, and
63 normalizes both to the OS we are currently on.
64
65 Args:
66 base_location (str): a path to the root data folder.
67 Converted to an absolute path immediately, so even if you supply a
68 relative path, later changes to the OS working directory will not
69 affect data paths.
70 """
71
72 def __init__(self, base_location):
73 if base_location is None:
74 self.base_location = None
75 else:
76 base_location = self._normalize_slashes(base_location)
77 self.base_location = os.path.abspath(base_location)
78
79 @contextmanager
80 def open(self, filename, mode, encoding=None):
81 """
82 Mimic the interface of the built in open context manager.
83
84 Args:
85 filename (str): path relative to base_location.
86
87 mode (str): 'r' (read), 'w' (write), or 'a' (append).
88 Other open modes are not supported because we don't want
89 to force all IO managers to support others.
90
91 Returns:
92 context manager yielding the open file
93 """
94 if mode not in ALLOWED_OPEN_MODES:
95 raise ValueError('mode {} not allowed in IO managers'.format(mode))
96
97 filepath = self.to_path(filename)
98
99 # make directories if needed
100 dirpath = os.path.dirname(filepath)
101 if not os.path.exists(dirpath):
102 os.makedirs(dirpath)
103
104 # normally we'd construct this context manager with try/finally, but
105 # here we already have a context manager for open so we just wrap it
106 with open(filepath, mode, encoding=encoding) as f:
107 yield f
108
109 def _normalize_slashes(self, location):
110 # note that this is NOT os.path.join - the difference is os.path.join
111 # discards empty strings, so if you use it on a re.split absolute
112 # path you will get a relative path!
113 return os.sep.join(re.split('[\\\\/]', location))
114
115 def to_path(self, location):
116 """
117 Convert a location string into a path on the local file system.
118
119 For DiskIO this just fixes slashes and prepends the base location,
120 doing nothing active with the file. But for other io managers that
121 refer to remote storage, this method may actually fetch the file and
122 put it at a temporary local path.
123
124 Args:
125 location (str): A location string for a complete dataset or
126 a file within it.
127
128 Returns:
129 path (str): The path on disk to which this location maps.
130 """
131 location = self._normalize_slashes(location)
132 if self.base_location:
133 return os.path.join(self.base_location, location)
134 else:
135 return location
136
137 def to_location(self, path):
138 """
139 Convert a local filesystem path into a location string.
140
141 Args:
142 path (str): a path on the local file system.
143
144 Returns:
145 location (str): the location string corresponding to this path.
146 """
147 if self.base_location:
148 return os.path.relpath(path, self.base_location)
149 else:
150 return path
151
152 def __repr__(self):
153 """Show the base location in the repr."""
154 return '<DiskIO, base_location={}>'.format(repr(self.base_location))
155
156 def join(self, *args):
157 """Context-dependent os.path.join for this io manager."""
158 return os.path.join(*list(map(self._normalize_slashes, args)))
159
160 def isfile(self, location):
161 """Check whether this location matches a file."""
162 path = self.to_path(location)
163 return os.path.isfile(path)
164
165 def list(self, location, maxdepth=1, include_dirs=False):
166 """
167 Return all files that match location.
168
169 This is either files whose names match up to an arbitrary extension,
170 or any files within an exactly matching directory name.
171
172 Args:
173 location (str): the location to match.
174 May contain the usual path wildcards * and ?
175
176 maxdepth (int, optional): maximum levels of directory nesting to
177 recurse into looking for files. Default 1.
178
179 include_dirs (bool, optional): whether to allow directories in
180 the results or just files. Default False.
181
182 Returns:
183 A list of matching files and/or directories, as locations
184 relative to our base_location.
185 """
186 location = self._normalize_slashes(location)
187 search_dir, pattern = os.path.split(location)
188 path = self.to_path(search_dir)
189
190 if not os.path.isdir(path):
191 return []
192
193 matches = [fn for fn in os.listdir(path) if fnmatch(fn, pattern + '*')]
194 out = []
195
196 for match in matches:
197 matchpath = self.join(path, match)
198 if os.path.isdir(matchpath) and fnmatch(match, pattern):
199 if maxdepth > 0:
200 # exact directory match - walk down to maxdepth
201 for root, dirs, files in os.walk(matchpath, topdown=True):
202 depth = root[len(path):].count(os.path.sep)
203 if depth == maxdepth:
204 dirs[:] = [] # don't recurse any further
205
206 for fn in files + (dirs if include_dirs else []):
207 out.append(self.to_location(self.join(root, fn)))
208
209 elif include_dirs:
210 out.append(self.join(search_dir, match))
211
212 elif (os.path.isfile(matchpath) and
213 (fnmatch(match, pattern) or
214 fnmatch(os.path.splitext(match)[0], pattern))):
215 # exact filename match, or match up to an extension
216 # note that we need fnmatch(match, pattern) in addition to the
217 # splitext test to cover the case of the base filename itself
218 # containing a dot.
219 out.append(self.join(search_dir, match))
220
221 return out
222
223 def remove(self, filename):
224 """Delete a file or folder and prune the directory tree."""
225 path = self.to_path(filename)
226 if(os.path.isdir(path)):
227 shutil.rmtree(path)
228 else:
229 os.remove(path)
230
231 filepath = os.path.split(path)[0]
232 try:
233 os.removedirs(filepath)
234 except OSError:
235 # directory was not empty - good that we're not removing it!
236 pass
237
238 def remove_all(self, location):
239 """
240 Delete all files/directories in the dataset at this location.
241
242 Afterward prunes the directory tree.
243 """
244 for fn in self.list(location):
245 self.remove(fn)
246
247
248 class FileWrapper:
249 def read(self, size=None):
250 raise NotImplementedError
251
252 def readline(self, size=None):
253 raise NotImplementedError
254
255 def __iter__(self):
256 return self
257
258 def __next__(self):
259 line = self.readline()
260 if line:
261 return line
262 else:
263 raise StopIteration
264
265 def write(self, s):
266 raise NotImplementedError
267
268 def writelines(self, seq):
269 for s in seq:
270 self.write(s)
271
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/qcodes/data/io.py b/qcodes/data/io.py
--- a/qcodes/data/io.py
+++ b/qcodes/data/io.py
@@ -28,13 +28,9 @@
- iter(): usually return self, but can be any iterator over lines
- next(): assuming iter() returns self, this yields the next line.
- (note: iter and next can be constructed automatically by FileWrapper
- if you implement readline.)
-
In write or append mode:
- write(s): add string s to the end of the file.
- - writelines(seq): add a sequence of strings (can be constructed
- automatically if you use FileWrapper)
+ - writelines(seq): add a sequence of strings
IO managers should also implement:
@@ -223,7 +219,7 @@
def remove(self, filename):
"""Delete a file or folder and prune the directory tree."""
path = self.to_path(filename)
- if(os.path.isdir(path)):
+ if os.path.isdir(path):
shutil.rmtree(path)
else:
os.remove(path)
@@ -243,28 +239,3 @@
"""
for fn in self.list(location):
self.remove(fn)
-
-
-class FileWrapper:
- def read(self, size=None):
- raise NotImplementedError
-
- def readline(self, size=None):
- raise NotImplementedError
-
- def __iter__(self):
- return self
-
- def __next__(self):
- line = self.readline()
- if line:
- return line
- else:
- raise StopIteration
-
- def write(self, s):
- raise NotImplementedError
-
- def writelines(self, seq):
- for s in seq:
- self.write(s)
|
{"golden_diff": "diff --git a/qcodes/data/io.py b/qcodes/data/io.py\n--- a/qcodes/data/io.py\n+++ b/qcodes/data/io.py\n@@ -28,13 +28,9 @@\n - iter(): usually return self, but can be any iterator over lines\n - next(): assuming iter() returns self, this yields the next line.\n \n- (note: iter and next can be constructed automatically by FileWrapper\n- if you implement readline.)\n-\n In write or append mode:\n - write(s): add string s to the end of the file.\n- - writelines(seq): add a sequence of strings (can be constructed\n- automatically if you use FileWrapper)\n+ - writelines(seq): add a sequence of strings\n \n IO managers should also implement:\n \n@@ -223,7 +219,7 @@\n def remove(self, filename):\n \"\"\"Delete a file or folder and prune the directory tree.\"\"\"\n path = self.to_path(filename)\n- if(os.path.isdir(path)):\n+ if os.path.isdir(path):\n shutil.rmtree(path)\n else:\n os.remove(path)\n@@ -243,28 +239,3 @@\n \"\"\"\n for fn in self.list(location):\n self.remove(fn)\n-\n-\n-class FileWrapper:\n- def read(self, size=None):\n- raise NotImplementedError\n-\n- def readline(self, size=None):\n- raise NotImplementedError\n-\n- def __iter__(self):\n- return self\n-\n- def __next__(self):\n- line = self.readline()\n- if line:\n- return line\n- else:\n- raise StopIteration\n-\n- def write(self, s):\n- raise NotImplementedError\n-\n- def writelines(self, seq):\n- for s in seq:\n- self.write(s)\n", "issue": "Unused class FileWrapper\nIn the io manager module there is a FileWrapper class that seems to be never used anywhere?\r\nAside from the dubious utility of such a class (maybe if documented one could figure out) , maybe we should just remove it ?\r\n\r\n@QCoDeS/core ? \r\n\n", "before_files": [{"content": "\"\"\"\nIO managers for QCodes.\n\nIO managers wrap whatever physical storage layer the user wants to use\nin an interface mimicking the built-in <open> context manager, with\nsome restrictions to minimize the overhead in creating new IO managers.\n\nThe main thing these managers need to implement is the open context manager:\n\n- Only the context manager needs to be implemented, not separate\n open function and close methods.\n\n- open takes the standard parameters:\n\n - filename: (string)\n - mode: (string) only 'r' (read), 'w' (write), and 'a' (append) are\n expected to be implemented. As with normal file objects, the only\n difference between write and append is that write empties the file\n before adding new data, and append leaves the existing contents in\n place but starts writing at the end.\n - encoding: If a special output encoding is desired. i.e. 'utf8\n\n- the file-like object returned should implement a minimal set of operations.\n\n In read mode:\n - read([size]): read to the end or at most size bytes into a string\n - readline([size]): read until a newline or up to size bytes, into a string\n - iter(): usually return self, but can be any iterator over lines\n - next(): assuming iter() returns self, this yields the next line.\n\n (note: iter and next can be constructed automatically by FileWrapper\n if you implement readline.)\n\n In write or append mode:\n - write(s): add string s to the end of the file.\n - writelines(seq): add a sequence of strings (can be constructed\n automatically if you use FileWrapper)\n\nIO managers should also implement:\n\n- a join method, ala os.path.join(\\*args).\n- a list method, that returns all objects matching location\n- a remove method, ala os.remove(path) except that it will remove directories\n as well as files, since we're allowing \"locations\" to be directories\n or files.\n\"\"\"\n\nfrom contextlib import contextmanager\nimport os\nimport re\nimport shutil\nfrom fnmatch import fnmatch\n\nALLOWED_OPEN_MODES = ('r', 'w', 'a')\n\n\nclass DiskIO:\n\n \"\"\"\n Simple IO object to wrap disk operations with a custom base location.\n\n Also accepts both forward and backward slashes at any point, and\n normalizes both to the OS we are currently on.\n\n Args:\n base_location (str): a path to the root data folder.\n Converted to an absolute path immediately, so even if you supply a\n relative path, later changes to the OS working directory will not\n affect data paths.\n \"\"\"\n\n def __init__(self, base_location):\n if base_location is None:\n self.base_location = None\n else:\n base_location = self._normalize_slashes(base_location)\n self.base_location = os.path.abspath(base_location)\n\n @contextmanager\n def open(self, filename, mode, encoding=None):\n \"\"\"\n Mimic the interface of the built in open context manager.\n\n Args:\n filename (str): path relative to base_location.\n\n mode (str): 'r' (read), 'w' (write), or 'a' (append).\n Other open modes are not supported because we don't want\n to force all IO managers to support others.\n\n Returns:\n context manager yielding the open file\n \"\"\"\n if mode not in ALLOWED_OPEN_MODES:\n raise ValueError('mode {} not allowed in IO managers'.format(mode))\n\n filepath = self.to_path(filename)\n\n # make directories if needed\n dirpath = os.path.dirname(filepath)\n if not os.path.exists(dirpath):\n os.makedirs(dirpath)\n\n # normally we'd construct this context manager with try/finally, but\n # here we already have a context manager for open so we just wrap it\n with open(filepath, mode, encoding=encoding) as f:\n yield f\n\n def _normalize_slashes(self, location):\n # note that this is NOT os.path.join - the difference is os.path.join\n # discards empty strings, so if you use it on a re.split absolute\n # path you will get a relative path!\n return os.sep.join(re.split('[\\\\\\\\/]', location))\n\n def to_path(self, location):\n \"\"\"\n Convert a location string into a path on the local file system.\n\n For DiskIO this just fixes slashes and prepends the base location,\n doing nothing active with the file. But for other io managers that\n refer to remote storage, this method may actually fetch the file and\n put it at a temporary local path.\n\n Args:\n location (str): A location string for a complete dataset or\n a file within it.\n\n Returns:\n path (str): The path on disk to which this location maps.\n \"\"\"\n location = self._normalize_slashes(location)\n if self.base_location:\n return os.path.join(self.base_location, location)\n else:\n return location\n\n def to_location(self, path):\n \"\"\"\n Convert a local filesystem path into a location string.\n\n Args:\n path (str): a path on the local file system.\n\n Returns:\n location (str): the location string corresponding to this path.\n \"\"\"\n if self.base_location:\n return os.path.relpath(path, self.base_location)\n else:\n return path\n\n def __repr__(self):\n \"\"\"Show the base location in the repr.\"\"\"\n return '<DiskIO, base_location={}>'.format(repr(self.base_location))\n\n def join(self, *args):\n \"\"\"Context-dependent os.path.join for this io manager.\"\"\"\n return os.path.join(*list(map(self._normalize_slashes, args)))\n\n def isfile(self, location):\n \"\"\"Check whether this location matches a file.\"\"\"\n path = self.to_path(location)\n return os.path.isfile(path)\n\n def list(self, location, maxdepth=1, include_dirs=False):\n \"\"\"\n Return all files that match location.\n\n This is either files whose names match up to an arbitrary extension,\n or any files within an exactly matching directory name.\n\n Args:\n location (str): the location to match.\n May contain the usual path wildcards * and ?\n\n maxdepth (int, optional): maximum levels of directory nesting to\n recurse into looking for files. Default 1.\n\n include_dirs (bool, optional): whether to allow directories in\n the results or just files. Default False.\n\n Returns:\n A list of matching files and/or directories, as locations\n relative to our base_location.\n \"\"\"\n location = self._normalize_slashes(location)\n search_dir, pattern = os.path.split(location)\n path = self.to_path(search_dir)\n\n if not os.path.isdir(path):\n return []\n\n matches = [fn for fn in os.listdir(path) if fnmatch(fn, pattern + '*')]\n out = []\n\n for match in matches:\n matchpath = self.join(path, match)\n if os.path.isdir(matchpath) and fnmatch(match, pattern):\n if maxdepth > 0:\n # exact directory match - walk down to maxdepth\n for root, dirs, files in os.walk(matchpath, topdown=True):\n depth = root[len(path):].count(os.path.sep)\n if depth == maxdepth:\n dirs[:] = [] # don't recurse any further\n\n for fn in files + (dirs if include_dirs else []):\n out.append(self.to_location(self.join(root, fn)))\n\n elif include_dirs:\n out.append(self.join(search_dir, match))\n\n elif (os.path.isfile(matchpath) and\n (fnmatch(match, pattern) or\n fnmatch(os.path.splitext(match)[0], pattern))):\n # exact filename match, or match up to an extension\n # note that we need fnmatch(match, pattern) in addition to the\n # splitext test to cover the case of the base filename itself\n # containing a dot.\n out.append(self.join(search_dir, match))\n\n return out\n\n def remove(self, filename):\n \"\"\"Delete a file or folder and prune the directory tree.\"\"\"\n path = self.to_path(filename)\n if(os.path.isdir(path)):\n shutil.rmtree(path)\n else:\n os.remove(path)\n\n filepath = os.path.split(path)[0]\n try:\n os.removedirs(filepath)\n except OSError:\n # directory was not empty - good that we're not removing it!\n pass\n\n def remove_all(self, location):\n \"\"\"\n Delete all files/directories in the dataset at this location.\n\n Afterward prunes the directory tree.\n \"\"\"\n for fn in self.list(location):\n self.remove(fn)\n\n\nclass FileWrapper:\n def read(self, size=None):\n raise NotImplementedError\n\n def readline(self, size=None):\n raise NotImplementedError\n\n def __iter__(self):\n return self\n\n def __next__(self):\n line = self.readline()\n if line:\n return line\n else:\n raise StopIteration\n\n def write(self, s):\n raise NotImplementedError\n\n def writelines(self, seq):\n for s in seq:\n self.write(s)\n", "path": "qcodes/data/io.py"}], "after_files": [{"content": "\"\"\"\nIO managers for QCodes.\n\nIO managers wrap whatever physical storage layer the user wants to use\nin an interface mimicking the built-in <open> context manager, with\nsome restrictions to minimize the overhead in creating new IO managers.\n\nThe main thing these managers need to implement is the open context manager:\n\n- Only the context manager needs to be implemented, not separate\n open function and close methods.\n\n- open takes the standard parameters:\n\n - filename: (string)\n - mode: (string) only 'r' (read), 'w' (write), and 'a' (append) are\n expected to be implemented. As with normal file objects, the only\n difference between write and append is that write empties the file\n before adding new data, and append leaves the existing contents in\n place but starts writing at the end.\n - encoding: If a special output encoding is desired. i.e. 'utf8\n\n- the file-like object returned should implement a minimal set of operations.\n\n In read mode:\n - read([size]): read to the end or at most size bytes into a string\n - readline([size]): read until a newline or up to size bytes, into a string\n - iter(): usually return self, but can be any iterator over lines\n - next(): assuming iter() returns self, this yields the next line.\n\n In write or append mode:\n - write(s): add string s to the end of the file.\n - writelines(seq): add a sequence of strings\n\nIO managers should also implement:\n\n- a join method, ala os.path.join(\\*args).\n- a list method, that returns all objects matching location\n- a remove method, ala os.remove(path) except that it will remove directories\n as well as files, since we're allowing \"locations\" to be directories\n or files.\n\"\"\"\n\nfrom contextlib import contextmanager\nimport os\nimport re\nimport shutil\nfrom fnmatch import fnmatch\n\nALLOWED_OPEN_MODES = ('r', 'w', 'a')\n\n\nclass DiskIO:\n\n \"\"\"\n Simple IO object to wrap disk operations with a custom base location.\n\n Also accepts both forward and backward slashes at any point, and\n normalizes both to the OS we are currently on.\n\n Args:\n base_location (str): a path to the root data folder.\n Converted to an absolute path immediately, so even if you supply a\n relative path, later changes to the OS working directory will not\n affect data paths.\n \"\"\"\n\n def __init__(self, base_location):\n if base_location is None:\n self.base_location = None\n else:\n base_location = self._normalize_slashes(base_location)\n self.base_location = os.path.abspath(base_location)\n\n @contextmanager\n def open(self, filename, mode, encoding=None):\n \"\"\"\n Mimic the interface of the built in open context manager.\n\n Args:\n filename (str): path relative to base_location.\n\n mode (str): 'r' (read), 'w' (write), or 'a' (append).\n Other open modes are not supported because we don't want\n to force all IO managers to support others.\n\n Returns:\n context manager yielding the open file\n \"\"\"\n if mode not in ALLOWED_OPEN_MODES:\n raise ValueError('mode {} not allowed in IO managers'.format(mode))\n\n filepath = self.to_path(filename)\n\n # make directories if needed\n dirpath = os.path.dirname(filepath)\n if not os.path.exists(dirpath):\n os.makedirs(dirpath)\n\n # normally we'd construct this context manager with try/finally, but\n # here we already have a context manager for open so we just wrap it\n with open(filepath, mode, encoding=encoding) as f:\n yield f\n\n def _normalize_slashes(self, location):\n # note that this is NOT os.path.join - the difference is os.path.join\n # discards empty strings, so if you use it on a re.split absolute\n # path you will get a relative path!\n return os.sep.join(re.split('[\\\\\\\\/]', location))\n\n def to_path(self, location):\n \"\"\"\n Convert a location string into a path on the local file system.\n\n For DiskIO this just fixes slashes and prepends the base location,\n doing nothing active with the file. But for other io managers that\n refer to remote storage, this method may actually fetch the file and\n put it at a temporary local path.\n\n Args:\n location (str): A location string for a complete dataset or\n a file within it.\n\n Returns:\n path (str): The path on disk to which this location maps.\n \"\"\"\n location = self._normalize_slashes(location)\n if self.base_location:\n return os.path.join(self.base_location, location)\n else:\n return location\n\n def to_location(self, path):\n \"\"\"\n Convert a local filesystem path into a location string.\n\n Args:\n path (str): a path on the local file system.\n\n Returns:\n location (str): the location string corresponding to this path.\n \"\"\"\n if self.base_location:\n return os.path.relpath(path, self.base_location)\n else:\n return path\n\n def __repr__(self):\n \"\"\"Show the base location in the repr.\"\"\"\n return '<DiskIO, base_location={}>'.format(repr(self.base_location))\n\n def join(self, *args):\n \"\"\"Context-dependent os.path.join for this io manager.\"\"\"\n return os.path.join(*list(map(self._normalize_slashes, args)))\n\n def isfile(self, location):\n \"\"\"Check whether this location matches a file.\"\"\"\n path = self.to_path(location)\n return os.path.isfile(path)\n\n def list(self, location, maxdepth=1, include_dirs=False):\n \"\"\"\n Return all files that match location.\n\n This is either files whose names match up to an arbitrary extension,\n or any files within an exactly matching directory name.\n\n Args:\n location (str): the location to match.\n May contain the usual path wildcards * and ?\n\n maxdepth (int, optional): maximum levels of directory nesting to\n recurse into looking for files. Default 1.\n\n include_dirs (bool, optional): whether to allow directories in\n the results or just files. Default False.\n\n Returns:\n A list of matching files and/or directories, as locations\n relative to our base_location.\n \"\"\"\n location = self._normalize_slashes(location)\n search_dir, pattern = os.path.split(location)\n path = self.to_path(search_dir)\n\n if not os.path.isdir(path):\n return []\n\n matches = [fn for fn in os.listdir(path) if fnmatch(fn, pattern + '*')]\n out = []\n\n for match in matches:\n matchpath = self.join(path, match)\n if os.path.isdir(matchpath) and fnmatch(match, pattern):\n if maxdepth > 0:\n # exact directory match - walk down to maxdepth\n for root, dirs, files in os.walk(matchpath, topdown=True):\n depth = root[len(path):].count(os.path.sep)\n if depth == maxdepth:\n dirs[:] = [] # don't recurse any further\n\n for fn in files + (dirs if include_dirs else []):\n out.append(self.to_location(self.join(root, fn)))\n\n elif include_dirs:\n out.append(self.join(search_dir, match))\n\n elif (os.path.isfile(matchpath) and\n (fnmatch(match, pattern) or\n fnmatch(os.path.splitext(match)[0], pattern))):\n # exact filename match, or match up to an extension\n # note that we need fnmatch(match, pattern) in addition to the\n # splitext test to cover the case of the base filename itself\n # containing a dot.\n out.append(self.join(search_dir, match))\n\n return out\n\n def remove(self, filename):\n \"\"\"Delete a file or folder and prune the directory tree.\"\"\"\n path = self.to_path(filename)\n if os.path.isdir(path):\n shutil.rmtree(path)\n else:\n os.remove(path)\n\n filepath = os.path.split(path)[0]\n try:\n os.removedirs(filepath)\n except OSError:\n # directory was not empty - good that we're not removing it!\n pass\n\n def remove_all(self, location):\n \"\"\"\n Delete all files/directories in the dataset at this location.\n\n Afterward prunes the directory tree.\n \"\"\"\n for fn in self.list(location):\n self.remove(fn)\n", "path": "qcodes/data/io.py"}]}
| 3,021 | 393 |
gh_patches_debug_14922
|
rasdani/github-patches
|
git_diff
|
elastic__apm-agent-python-1765
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
botocore instrumentation creating empty services
When using `boto3` to call out to services outside of the specially handled services (S3, SQS, DynamoDB, SNS), the `botocore` instrumentation creates spans with an incorrect `service.name`. This results in extra, empty services in both the service list and the service map.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `elasticapm/instrumentation/packages/botocore.py`
Content:
```
1 # BSD 3-Clause License
2 #
3 # Copyright (c) 2019, Elasticsearch BV
4 # All rights reserved.
5 #
6 # Redistribution and use in source and binary forms, with or without
7 # modification, are permitted provided that the following conditions are met:
8 #
9 # * Redistributions of source code must retain the above copyright notice, this
10 # list of conditions and the following disclaimer.
11 #
12 # * Redistributions in binary form must reproduce the above copyright notice,
13 # this list of conditions and the following disclaimer in the documentation
14 # and/or other materials provided with the distribution.
15 #
16 # * Neither the name of the copyright holder nor the names of its
17 # contributors may be used to endorse or promote products derived from
18 # this software without specific prior written permission.
19 #
20 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
23 # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
24 # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
26 # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
27 # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28 # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
31 import urllib.parse
32 from collections import namedtuple
33
34 from elasticapm.conf import constants
35 from elasticapm.instrumentation.packages.base import AbstractInstrumentedModule
36 from elasticapm.traces import SpanType, capture_span, execution_context
37 from elasticapm.utils.disttracing import TraceParent
38 from elasticapm.utils.logging import get_logger
39
40 logger = get_logger("elasticapm.instrument")
41
42 SQS_MAX_ATTRIBUTES = 10
43
44
45 HandlerInfo = namedtuple("HandlerInfo", ("signature", "span_type", "span_subtype", "span_action", "context"))
46
47 # Used for boto3 < 1.7
48 endpoint_to_service_id = {"SNS": "SNS", "S3": "S3", "DYNAMODB": "DynamoDB", "SQS": "SQS"}
49
50
51 class BotocoreInstrumentation(AbstractInstrumentedModule):
52 name = "botocore"
53
54 instrument_list = [("botocore.client", "BaseClient._make_api_call")]
55
56 capture_span_ctx = capture_span
57
58 def _call(self, service, instance, args, kwargs):
59 """
60 This is split out from `call()` so that it can be re-used by the
61 aiobotocore instrumentation without duplicating all of this code.
62 """
63 operation_name = kwargs.get("operation_name", args[0])
64
65 parsed_url = urllib.parse.urlparse(instance.meta.endpoint_url)
66 context = {
67 "destination": {
68 "address": parsed_url.hostname,
69 "port": parsed_url.port,
70 "cloud": {"region": instance.meta.region_name},
71 }
72 }
73
74 handler_info = None
75 handler = handlers.get(service, False)
76 if handler:
77 handler_info = handler(operation_name, service, instance, args, kwargs, context)
78 if not handler_info:
79 handler_info = handle_default(operation_name, service, instance, args, kwargs, context)
80
81 return self.capture_span_ctx(
82 handler_info.signature,
83 span_type=handler_info.span_type,
84 leaf=True,
85 span_subtype=handler_info.span_subtype,
86 span_action=handler_info.span_action,
87 extra=handler_info.context,
88 )
89
90 def _get_service(self, instance):
91 service_model = instance.meta.service_model
92 if hasattr(service_model, "service_id"): # added in boto3 1.7
93 service = service_model.service_id
94 else:
95 service = service_model.service_name.upper()
96 service = endpoint_to_service_id.get(service, service)
97 return service
98
99 def call(self, module, method, wrapped, instance, args, kwargs):
100 service = self._get_service(instance)
101
102 ctx = self._call(service, instance, args, kwargs)
103 with ctx as span:
104 if service in pre_span_modifiers:
105 pre_span_modifiers[service](span, args, kwargs)
106 result = wrapped(*args, **kwargs)
107 if service in post_span_modifiers:
108 post_span_modifiers[service](span, args, kwargs, result)
109 request_id = result.get("ResponseMetadata", {}).get("RequestId")
110 if request_id:
111 span.update_context("http", {"request": {"id": request_id}})
112 return result
113
114
115 def handle_s3(operation_name, service, instance, args, kwargs, context):
116 span_type = "storage"
117 span_subtype = "s3"
118 span_action = operation_name
119 if len(args) > 1 and "Bucket" in args[1]:
120 bucket = args[1]["Bucket"]
121 else:
122 # TODO handle Access Points
123 bucket = ""
124 signature = f"S3 {operation_name} {bucket}"
125
126 context["destination"]["service"] = {"name": span_subtype, "resource": bucket, "type": span_type}
127
128 return HandlerInfo(signature, span_type, span_subtype, span_action, context)
129
130
131 def handle_dynamodb(operation_name, service, instance, args, kwargs, context):
132 span_type = "db"
133 span_subtype = "dynamodb"
134 span_action = "query"
135 if len(args) > 1 and "TableName" in args[1]:
136 table = args[1]["TableName"]
137 else:
138 table = ""
139 signature = f"DynamoDB {operation_name} {table}".rstrip()
140
141 context["db"] = {"type": "dynamodb", "instance": instance.meta.region_name}
142 if operation_name == "Query" and len(args) > 1 and "KeyConditionExpression" in args[1]:
143 context["db"]["statement"] = args[1]["KeyConditionExpression"]
144
145 context["destination"]["service"] = {"name": span_subtype, "resource": table, "type": span_type}
146 return HandlerInfo(signature, span_type, span_subtype, span_action, context)
147
148
149 def handle_sns(operation_name, service, instance, args, kwargs, context):
150 if operation_name != "Publish":
151 # only "publish" is handled specifically, other endpoints get the default treatment
152 return False
153 span_type = "messaging"
154 span_subtype = "sns"
155 span_action = "send"
156 topic_name = ""
157 if len(args) > 1:
158 if "Name" in args[1]:
159 topic_name = args[1]["Name"]
160 if "TopicArn" in args[1]:
161 topic_name = args[1]["TopicArn"].rsplit(":", maxsplit=1)[-1]
162 signature = f"SNS {operation_name} {topic_name}".rstrip()
163 context["destination"]["service"] = {
164 "name": span_subtype,
165 "resource": f"{span_subtype}/{topic_name}" if topic_name else span_subtype,
166 "type": span_type,
167 }
168 return HandlerInfo(signature, span_type, span_subtype, span_action, context)
169
170
171 SQS_OPERATIONS = {
172 "SendMessage": {"span_action": "send", "signature": "SEND to"},
173 "SendMessageBatch": {"span_action": "send_batch", "signature": "SEND_BATCH to"},
174 "ReceiveMessage": {"span_action": "receive", "signature": "RECEIVE from"},
175 "DeleteMessage": {"span_action": "delete", "signature": "DELETE from"},
176 "DeleteMessageBatch": {"span_action": "delete_batch", "signature": "DELETE_BATCH from"},
177 }
178
179
180 def handle_sqs(operation_name, service, instance, args, kwargs, context):
181 op = SQS_OPERATIONS.get(operation_name, None)
182 if not op:
183 # only "publish" is handled specifically, other endpoints get the default treatment
184 return False
185 span_type = "messaging"
186 span_subtype = "sqs"
187 topic_name = ""
188
189 if len(args) > 1:
190 topic_name = args[1]["QueueUrl"].rsplit("/", maxsplit=1)[-1]
191 signature = f"SQS {op['signature']} {topic_name}".rstrip() if topic_name else f"SQS {op['signature']}"
192 context["destination"]["service"] = {
193 "name": span_subtype,
194 "resource": f"{span_subtype}/{topic_name}" if topic_name else span_subtype,
195 "type": span_type,
196 }
197 return HandlerInfo(signature, span_type, span_subtype, op["span_action"], context)
198
199
200 def modify_span_sqs_pre(span, args, kwargs):
201 operation_name = kwargs.get("operation_name", args[0])
202 if span.id:
203 trace_parent = span.transaction.trace_parent.copy_from(span_id=span.id)
204 else:
205 # this is a dropped span, use transaction id instead
206 transaction = execution_context.get_transaction()
207 trace_parent = transaction.trace_parent.copy_from(span_id=transaction.id)
208 attributes = {constants.TRACEPARENT_HEADER_NAME: {"DataType": "String", "StringValue": trace_parent.to_string()}}
209 if trace_parent.tracestate:
210 attributes[constants.TRACESTATE_HEADER_NAME] = {"DataType": "String", "StringValue": trace_parent.tracestate}
211 if len(args) > 1:
212 if operation_name in ("SendMessage", "SendMessageBatch"):
213 attributes_count = len(attributes)
214 if operation_name == "SendMessage":
215 messages = [args[1]]
216 else:
217 messages = args[1]["Entries"]
218 for message in messages:
219 message["MessageAttributes"] = message.get("MessageAttributes") or {}
220 if len(message["MessageAttributes"]) + attributes_count <= SQS_MAX_ATTRIBUTES:
221 message["MessageAttributes"].update(attributes)
222 else:
223 logger.info("Not adding disttracing headers to message due to attribute limit reached")
224 elif operation_name == "ReceiveMessage":
225 message_attributes = args[1].setdefault("MessageAttributeNames", [])
226 if "All" not in message_attributes:
227 message_attributes.extend([constants.TRACEPARENT_HEADER_NAME, constants.TRACESTATE_HEADER_NAME])
228
229
230 def modify_span_sqs_post(span: SpanType, args, kwargs, result):
231 operation_name = kwargs.get("operation_name", args[0])
232 if operation_name == "ReceiveMessage" and "Messages" in result:
233 for message in result["Messages"][:1000]: # only up to 1000 span links are recorded
234 if "MessageAttributes" in message and constants.TRACEPARENT_HEADER_NAME in message["MessageAttributes"]:
235 tp = TraceParent.from_string(
236 message["MessageAttributes"][constants.TRACEPARENT_HEADER_NAME]["StringValue"]
237 )
238 span.add_link(tp)
239
240
241 def handle_default(operation_name, service, instance, args, kwargs, destination):
242 span_type = "aws"
243 span_subtype = service.lower()
244 span_action = operation_name
245
246 destination["service"] = {"name": span_subtype, "resource": span_subtype, "type": span_type}
247
248 signature = f"{service}:{operation_name}"
249 return HandlerInfo(signature, span_type, span_subtype, span_action, destination)
250
251
252 handlers = {
253 "S3": handle_s3,
254 "DynamoDB": handle_dynamodb,
255 "SNS": handle_sns,
256 "SQS": handle_sqs,
257 "default": handle_default,
258 }
259
260 pre_span_modifiers = {
261 "SQS": modify_span_sqs_pre,
262 }
263
264 post_span_modifiers = {
265 "SQS": modify_span_sqs_post,
266 }
267
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/elasticapm/instrumentation/packages/botocore.py b/elasticapm/instrumentation/packages/botocore.py
--- a/elasticapm/instrumentation/packages/botocore.py
+++ b/elasticapm/instrumentation/packages/botocore.py
@@ -238,15 +238,15 @@
span.add_link(tp)
-def handle_default(operation_name, service, instance, args, kwargs, destination):
+def handle_default(operation_name, service, instance, args, kwargs, context):
span_type = "aws"
span_subtype = service.lower()
span_action = operation_name
- destination["service"] = {"name": span_subtype, "resource": span_subtype, "type": span_type}
+ context["destination"]["service"] = {"name": span_subtype, "resource": span_subtype, "type": span_type}
signature = f"{service}:{operation_name}"
- return HandlerInfo(signature, span_type, span_subtype, span_action, destination)
+ return HandlerInfo(signature, span_type, span_subtype, span_action, context)
handlers = {
|
{"golden_diff": "diff --git a/elasticapm/instrumentation/packages/botocore.py b/elasticapm/instrumentation/packages/botocore.py\n--- a/elasticapm/instrumentation/packages/botocore.py\n+++ b/elasticapm/instrumentation/packages/botocore.py\n@@ -238,15 +238,15 @@\n span.add_link(tp)\n \n \n-def handle_default(operation_name, service, instance, args, kwargs, destination):\n+def handle_default(operation_name, service, instance, args, kwargs, context):\n span_type = \"aws\"\n span_subtype = service.lower()\n span_action = operation_name\n \n- destination[\"service\"] = {\"name\": span_subtype, \"resource\": span_subtype, \"type\": span_type}\n+ context[\"destination\"][\"service\"] = {\"name\": span_subtype, \"resource\": span_subtype, \"type\": span_type}\n \n signature = f\"{service}:{operation_name}\"\n- return HandlerInfo(signature, span_type, span_subtype, span_action, destination)\n+ return HandlerInfo(signature, span_type, span_subtype, span_action, context)\n \n \n handlers = {\n", "issue": "botocore instrumentation creating empty services\nWhen using `boto3` to call out to services outside of the specially handled services (S3, SQS, DynamoDB, SNS), the `botocore` instrumentation creates spans with an incorrect `service.name`. This results in extra, empty services in both the service list and the service map.\n", "before_files": [{"content": "# BSD 3-Clause License\n#\n# Copyright (c) 2019, Elasticsearch BV\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nimport urllib.parse\nfrom collections import namedtuple\n\nfrom elasticapm.conf import constants\nfrom elasticapm.instrumentation.packages.base import AbstractInstrumentedModule\nfrom elasticapm.traces import SpanType, capture_span, execution_context\nfrom elasticapm.utils.disttracing import TraceParent\nfrom elasticapm.utils.logging import get_logger\n\nlogger = get_logger(\"elasticapm.instrument\")\n\nSQS_MAX_ATTRIBUTES = 10\n\n\nHandlerInfo = namedtuple(\"HandlerInfo\", (\"signature\", \"span_type\", \"span_subtype\", \"span_action\", \"context\"))\n\n# Used for boto3 < 1.7\nendpoint_to_service_id = {\"SNS\": \"SNS\", \"S3\": \"S3\", \"DYNAMODB\": \"DynamoDB\", \"SQS\": \"SQS\"}\n\n\nclass BotocoreInstrumentation(AbstractInstrumentedModule):\n name = \"botocore\"\n\n instrument_list = [(\"botocore.client\", \"BaseClient._make_api_call\")]\n\n capture_span_ctx = capture_span\n\n def _call(self, service, instance, args, kwargs):\n \"\"\"\n This is split out from `call()` so that it can be re-used by the\n aiobotocore instrumentation without duplicating all of this code.\n \"\"\"\n operation_name = kwargs.get(\"operation_name\", args[0])\n\n parsed_url = urllib.parse.urlparse(instance.meta.endpoint_url)\n context = {\n \"destination\": {\n \"address\": parsed_url.hostname,\n \"port\": parsed_url.port,\n \"cloud\": {\"region\": instance.meta.region_name},\n }\n }\n\n handler_info = None\n handler = handlers.get(service, False)\n if handler:\n handler_info = handler(operation_name, service, instance, args, kwargs, context)\n if not handler_info:\n handler_info = handle_default(operation_name, service, instance, args, kwargs, context)\n\n return self.capture_span_ctx(\n handler_info.signature,\n span_type=handler_info.span_type,\n leaf=True,\n span_subtype=handler_info.span_subtype,\n span_action=handler_info.span_action,\n extra=handler_info.context,\n )\n\n def _get_service(self, instance):\n service_model = instance.meta.service_model\n if hasattr(service_model, \"service_id\"): # added in boto3 1.7\n service = service_model.service_id\n else:\n service = service_model.service_name.upper()\n service = endpoint_to_service_id.get(service, service)\n return service\n\n def call(self, module, method, wrapped, instance, args, kwargs):\n service = self._get_service(instance)\n\n ctx = self._call(service, instance, args, kwargs)\n with ctx as span:\n if service in pre_span_modifiers:\n pre_span_modifiers[service](span, args, kwargs)\n result = wrapped(*args, **kwargs)\n if service in post_span_modifiers:\n post_span_modifiers[service](span, args, kwargs, result)\n request_id = result.get(\"ResponseMetadata\", {}).get(\"RequestId\")\n if request_id:\n span.update_context(\"http\", {\"request\": {\"id\": request_id}})\n return result\n\n\ndef handle_s3(operation_name, service, instance, args, kwargs, context):\n span_type = \"storage\"\n span_subtype = \"s3\"\n span_action = operation_name\n if len(args) > 1 and \"Bucket\" in args[1]:\n bucket = args[1][\"Bucket\"]\n else:\n # TODO handle Access Points\n bucket = \"\"\n signature = f\"S3 {operation_name} {bucket}\"\n\n context[\"destination\"][\"service\"] = {\"name\": span_subtype, \"resource\": bucket, \"type\": span_type}\n\n return HandlerInfo(signature, span_type, span_subtype, span_action, context)\n\n\ndef handle_dynamodb(operation_name, service, instance, args, kwargs, context):\n span_type = \"db\"\n span_subtype = \"dynamodb\"\n span_action = \"query\"\n if len(args) > 1 and \"TableName\" in args[1]:\n table = args[1][\"TableName\"]\n else:\n table = \"\"\n signature = f\"DynamoDB {operation_name} {table}\".rstrip()\n\n context[\"db\"] = {\"type\": \"dynamodb\", \"instance\": instance.meta.region_name}\n if operation_name == \"Query\" and len(args) > 1 and \"KeyConditionExpression\" in args[1]:\n context[\"db\"][\"statement\"] = args[1][\"KeyConditionExpression\"]\n\n context[\"destination\"][\"service\"] = {\"name\": span_subtype, \"resource\": table, \"type\": span_type}\n return HandlerInfo(signature, span_type, span_subtype, span_action, context)\n\n\ndef handle_sns(operation_name, service, instance, args, kwargs, context):\n if operation_name != \"Publish\":\n # only \"publish\" is handled specifically, other endpoints get the default treatment\n return False\n span_type = \"messaging\"\n span_subtype = \"sns\"\n span_action = \"send\"\n topic_name = \"\"\n if len(args) > 1:\n if \"Name\" in args[1]:\n topic_name = args[1][\"Name\"]\n if \"TopicArn\" in args[1]:\n topic_name = args[1][\"TopicArn\"].rsplit(\":\", maxsplit=1)[-1]\n signature = f\"SNS {operation_name} {topic_name}\".rstrip()\n context[\"destination\"][\"service\"] = {\n \"name\": span_subtype,\n \"resource\": f\"{span_subtype}/{topic_name}\" if topic_name else span_subtype,\n \"type\": span_type,\n }\n return HandlerInfo(signature, span_type, span_subtype, span_action, context)\n\n\nSQS_OPERATIONS = {\n \"SendMessage\": {\"span_action\": \"send\", \"signature\": \"SEND to\"},\n \"SendMessageBatch\": {\"span_action\": \"send_batch\", \"signature\": \"SEND_BATCH to\"},\n \"ReceiveMessage\": {\"span_action\": \"receive\", \"signature\": \"RECEIVE from\"},\n \"DeleteMessage\": {\"span_action\": \"delete\", \"signature\": \"DELETE from\"},\n \"DeleteMessageBatch\": {\"span_action\": \"delete_batch\", \"signature\": \"DELETE_BATCH from\"},\n}\n\n\ndef handle_sqs(operation_name, service, instance, args, kwargs, context):\n op = SQS_OPERATIONS.get(operation_name, None)\n if not op:\n # only \"publish\" is handled specifically, other endpoints get the default treatment\n return False\n span_type = \"messaging\"\n span_subtype = \"sqs\"\n topic_name = \"\"\n\n if len(args) > 1:\n topic_name = args[1][\"QueueUrl\"].rsplit(\"/\", maxsplit=1)[-1]\n signature = f\"SQS {op['signature']} {topic_name}\".rstrip() if topic_name else f\"SQS {op['signature']}\"\n context[\"destination\"][\"service\"] = {\n \"name\": span_subtype,\n \"resource\": f\"{span_subtype}/{topic_name}\" if topic_name else span_subtype,\n \"type\": span_type,\n }\n return HandlerInfo(signature, span_type, span_subtype, op[\"span_action\"], context)\n\n\ndef modify_span_sqs_pre(span, args, kwargs):\n operation_name = kwargs.get(\"operation_name\", args[0])\n if span.id:\n trace_parent = span.transaction.trace_parent.copy_from(span_id=span.id)\n else:\n # this is a dropped span, use transaction id instead\n transaction = execution_context.get_transaction()\n trace_parent = transaction.trace_parent.copy_from(span_id=transaction.id)\n attributes = {constants.TRACEPARENT_HEADER_NAME: {\"DataType\": \"String\", \"StringValue\": trace_parent.to_string()}}\n if trace_parent.tracestate:\n attributes[constants.TRACESTATE_HEADER_NAME] = {\"DataType\": \"String\", \"StringValue\": trace_parent.tracestate}\n if len(args) > 1:\n if operation_name in (\"SendMessage\", \"SendMessageBatch\"):\n attributes_count = len(attributes)\n if operation_name == \"SendMessage\":\n messages = [args[1]]\n else:\n messages = args[1][\"Entries\"]\n for message in messages:\n message[\"MessageAttributes\"] = message.get(\"MessageAttributes\") or {}\n if len(message[\"MessageAttributes\"]) + attributes_count <= SQS_MAX_ATTRIBUTES:\n message[\"MessageAttributes\"].update(attributes)\n else:\n logger.info(\"Not adding disttracing headers to message due to attribute limit reached\")\n elif operation_name == \"ReceiveMessage\":\n message_attributes = args[1].setdefault(\"MessageAttributeNames\", [])\n if \"All\" not in message_attributes:\n message_attributes.extend([constants.TRACEPARENT_HEADER_NAME, constants.TRACESTATE_HEADER_NAME])\n\n\ndef modify_span_sqs_post(span: SpanType, args, kwargs, result):\n operation_name = kwargs.get(\"operation_name\", args[0])\n if operation_name == \"ReceiveMessage\" and \"Messages\" in result:\n for message in result[\"Messages\"][:1000]: # only up to 1000 span links are recorded\n if \"MessageAttributes\" in message and constants.TRACEPARENT_HEADER_NAME in message[\"MessageAttributes\"]:\n tp = TraceParent.from_string(\n message[\"MessageAttributes\"][constants.TRACEPARENT_HEADER_NAME][\"StringValue\"]\n )\n span.add_link(tp)\n\n\ndef handle_default(operation_name, service, instance, args, kwargs, destination):\n span_type = \"aws\"\n span_subtype = service.lower()\n span_action = operation_name\n\n destination[\"service\"] = {\"name\": span_subtype, \"resource\": span_subtype, \"type\": span_type}\n\n signature = f\"{service}:{operation_name}\"\n return HandlerInfo(signature, span_type, span_subtype, span_action, destination)\n\n\nhandlers = {\n \"S3\": handle_s3,\n \"DynamoDB\": handle_dynamodb,\n \"SNS\": handle_sns,\n \"SQS\": handle_sqs,\n \"default\": handle_default,\n}\n\npre_span_modifiers = {\n \"SQS\": modify_span_sqs_pre,\n}\n\npost_span_modifiers = {\n \"SQS\": modify_span_sqs_post,\n}\n", "path": "elasticapm/instrumentation/packages/botocore.py"}], "after_files": [{"content": "# BSD 3-Clause License\n#\n# Copyright (c) 2019, Elasticsearch BV\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nimport urllib.parse\nfrom collections import namedtuple\n\nfrom elasticapm.conf import constants\nfrom elasticapm.instrumentation.packages.base import AbstractInstrumentedModule\nfrom elasticapm.traces import SpanType, capture_span, execution_context\nfrom elasticapm.utils.disttracing import TraceParent\nfrom elasticapm.utils.logging import get_logger\n\nlogger = get_logger(\"elasticapm.instrument\")\n\nSQS_MAX_ATTRIBUTES = 10\n\n\nHandlerInfo = namedtuple(\"HandlerInfo\", (\"signature\", \"span_type\", \"span_subtype\", \"span_action\", \"context\"))\n\n# Used for boto3 < 1.7\nendpoint_to_service_id = {\"SNS\": \"SNS\", \"S3\": \"S3\", \"DYNAMODB\": \"DynamoDB\", \"SQS\": \"SQS\"}\n\n\nclass BotocoreInstrumentation(AbstractInstrumentedModule):\n name = \"botocore\"\n\n instrument_list = [(\"botocore.client\", \"BaseClient._make_api_call\")]\n\n capture_span_ctx = capture_span\n\n def _call(self, service, instance, args, kwargs):\n \"\"\"\n This is split out from `call()` so that it can be re-used by the\n aiobotocore instrumentation without duplicating all of this code.\n \"\"\"\n operation_name = kwargs.get(\"operation_name\", args[0])\n\n parsed_url = urllib.parse.urlparse(instance.meta.endpoint_url)\n context = {\n \"destination\": {\n \"address\": parsed_url.hostname,\n \"port\": parsed_url.port,\n \"cloud\": {\"region\": instance.meta.region_name},\n }\n }\n\n handler_info = None\n handler = handlers.get(service, False)\n if handler:\n handler_info = handler(operation_name, service, instance, args, kwargs, context)\n if not handler_info:\n handler_info = handle_default(operation_name, service, instance, args, kwargs, context)\n\n return self.capture_span_ctx(\n handler_info.signature,\n span_type=handler_info.span_type,\n leaf=True,\n span_subtype=handler_info.span_subtype,\n span_action=handler_info.span_action,\n extra=handler_info.context,\n )\n\n def _get_service(self, instance):\n service_model = instance.meta.service_model\n if hasattr(service_model, \"service_id\"): # added in boto3 1.7\n service = service_model.service_id\n else:\n service = service_model.service_name.upper()\n service = endpoint_to_service_id.get(service, service)\n return service\n\n def call(self, module, method, wrapped, instance, args, kwargs):\n service = self._get_service(instance)\n\n ctx = self._call(service, instance, args, kwargs)\n with ctx as span:\n if service in pre_span_modifiers:\n pre_span_modifiers[service](span, args, kwargs)\n result = wrapped(*args, **kwargs)\n if service in post_span_modifiers:\n post_span_modifiers[service](span, args, kwargs, result)\n request_id = result.get(\"ResponseMetadata\", {}).get(\"RequestId\")\n if request_id:\n span.update_context(\"http\", {\"request\": {\"id\": request_id}})\n return result\n\n\ndef handle_s3(operation_name, service, instance, args, kwargs, context):\n span_type = \"storage\"\n span_subtype = \"s3\"\n span_action = operation_name\n if len(args) > 1 and \"Bucket\" in args[1]:\n bucket = args[1][\"Bucket\"]\n else:\n # TODO handle Access Points\n bucket = \"\"\n signature = f\"S3 {operation_name} {bucket}\"\n\n context[\"destination\"][\"service\"] = {\"name\": span_subtype, \"resource\": bucket, \"type\": span_type}\n\n return HandlerInfo(signature, span_type, span_subtype, span_action, context)\n\n\ndef handle_dynamodb(operation_name, service, instance, args, kwargs, context):\n span_type = \"db\"\n span_subtype = \"dynamodb\"\n span_action = \"query\"\n if len(args) > 1 and \"TableName\" in args[1]:\n table = args[1][\"TableName\"]\n else:\n table = \"\"\n signature = f\"DynamoDB {operation_name} {table}\".rstrip()\n\n context[\"db\"] = {\"type\": \"dynamodb\", \"instance\": instance.meta.region_name}\n if operation_name == \"Query\" and len(args) > 1 and \"KeyConditionExpression\" in args[1]:\n context[\"db\"][\"statement\"] = args[1][\"KeyConditionExpression\"]\n\n context[\"destination\"][\"service\"] = {\"name\": span_subtype, \"resource\": table, \"type\": span_type}\n return HandlerInfo(signature, span_type, span_subtype, span_action, context)\n\n\ndef handle_sns(operation_name, service, instance, args, kwargs, context):\n if operation_name != \"Publish\":\n # only \"publish\" is handled specifically, other endpoints get the default treatment\n return False\n span_type = \"messaging\"\n span_subtype = \"sns\"\n span_action = \"send\"\n topic_name = \"\"\n if len(args) > 1:\n if \"Name\" in args[1]:\n topic_name = args[1][\"Name\"]\n if \"TopicArn\" in args[1]:\n topic_name = args[1][\"TopicArn\"].rsplit(\":\", maxsplit=1)[-1]\n signature = f\"SNS {operation_name} {topic_name}\".rstrip()\n context[\"destination\"][\"service\"] = {\n \"name\": span_subtype,\n \"resource\": f\"{span_subtype}/{topic_name}\" if topic_name else span_subtype,\n \"type\": span_type,\n }\n return HandlerInfo(signature, span_type, span_subtype, span_action, context)\n\n\nSQS_OPERATIONS = {\n \"SendMessage\": {\"span_action\": \"send\", \"signature\": \"SEND to\"},\n \"SendMessageBatch\": {\"span_action\": \"send_batch\", \"signature\": \"SEND_BATCH to\"},\n \"ReceiveMessage\": {\"span_action\": \"receive\", \"signature\": \"RECEIVE from\"},\n \"DeleteMessage\": {\"span_action\": \"delete\", \"signature\": \"DELETE from\"},\n \"DeleteMessageBatch\": {\"span_action\": \"delete_batch\", \"signature\": \"DELETE_BATCH from\"},\n}\n\n\ndef handle_sqs(operation_name, service, instance, args, kwargs, context):\n op = SQS_OPERATIONS.get(operation_name, None)\n if not op:\n # only \"publish\" is handled specifically, other endpoints get the default treatment\n return False\n span_type = \"messaging\"\n span_subtype = \"sqs\"\n topic_name = \"\"\n\n if len(args) > 1:\n topic_name = args[1][\"QueueUrl\"].rsplit(\"/\", maxsplit=1)[-1]\n signature = f\"SQS {op['signature']} {topic_name}\".rstrip() if topic_name else f\"SQS {op['signature']}\"\n context[\"destination\"][\"service\"] = {\n \"name\": span_subtype,\n \"resource\": f\"{span_subtype}/{topic_name}\" if topic_name else span_subtype,\n \"type\": span_type,\n }\n return HandlerInfo(signature, span_type, span_subtype, op[\"span_action\"], context)\n\n\ndef modify_span_sqs_pre(span, args, kwargs):\n operation_name = kwargs.get(\"operation_name\", args[0])\n if span.id:\n trace_parent = span.transaction.trace_parent.copy_from(span_id=span.id)\n else:\n # this is a dropped span, use transaction id instead\n transaction = execution_context.get_transaction()\n trace_parent = transaction.trace_parent.copy_from(span_id=transaction.id)\n attributes = {constants.TRACEPARENT_HEADER_NAME: {\"DataType\": \"String\", \"StringValue\": trace_parent.to_string()}}\n if trace_parent.tracestate:\n attributes[constants.TRACESTATE_HEADER_NAME] = {\"DataType\": \"String\", \"StringValue\": trace_parent.tracestate}\n if len(args) > 1:\n if operation_name in (\"SendMessage\", \"SendMessageBatch\"):\n attributes_count = len(attributes)\n if operation_name == \"SendMessage\":\n messages = [args[1]]\n else:\n messages = args[1][\"Entries\"]\n for message in messages:\n message[\"MessageAttributes\"] = message.get(\"MessageAttributes\") or {}\n if len(message[\"MessageAttributes\"]) + attributes_count <= SQS_MAX_ATTRIBUTES:\n message[\"MessageAttributes\"].update(attributes)\n else:\n logger.info(\"Not adding disttracing headers to message due to attribute limit reached\")\n elif operation_name == \"ReceiveMessage\":\n message_attributes = args[1].setdefault(\"MessageAttributeNames\", [])\n if \"All\" not in message_attributes:\n message_attributes.extend([constants.TRACEPARENT_HEADER_NAME, constants.TRACESTATE_HEADER_NAME])\n\n\ndef modify_span_sqs_post(span: SpanType, args, kwargs, result):\n operation_name = kwargs.get(\"operation_name\", args[0])\n if operation_name == \"ReceiveMessage\" and \"Messages\" in result:\n for message in result[\"Messages\"][:1000]: # only up to 1000 span links are recorded\n if \"MessageAttributes\" in message and constants.TRACEPARENT_HEADER_NAME in message[\"MessageAttributes\"]:\n tp = TraceParent.from_string(\n message[\"MessageAttributes\"][constants.TRACEPARENT_HEADER_NAME][\"StringValue\"]\n )\n span.add_link(tp)\n\n\ndef handle_default(operation_name, service, instance, args, kwargs, context):\n span_type = \"aws\"\n span_subtype = service.lower()\n span_action = operation_name\n\n context[\"destination\"][\"service\"] = {\"name\": span_subtype, \"resource\": span_subtype, \"type\": span_type}\n\n signature = f\"{service}:{operation_name}\"\n return HandlerInfo(signature, span_type, span_subtype, span_action, context)\n\n\nhandlers = {\n \"S3\": handle_s3,\n \"DynamoDB\": handle_dynamodb,\n \"SNS\": handle_sns,\n \"SQS\": handle_sqs,\n \"default\": handle_default,\n}\n\npre_span_modifiers = {\n \"SQS\": modify_span_sqs_pre,\n}\n\npost_span_modifiers = {\n \"SQS\": modify_span_sqs_post,\n}\n", "path": "elasticapm/instrumentation/packages/botocore.py"}]}
| 3,552 | 244 |
gh_patches_debug_20929
|
rasdani/github-patches
|
git_diff
|
python-discord__bot-373
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
make !otn accept uppercase and punctuation
There are weird unicode alternatives for stuff like capital letters and punctuation that will get through the discord "only lowercase and dashes" filters.
When someone uses `!otn a` and tries to add a name with an apostrophe, we currently are transforming those into something that the system will accept. Let's do the same for exclamation points, question marks, and uppercase letters!
* For exclamation points, use ǃ
* For questionmarks, use ?
* For uppercase, use 𝖠𝖡𝖢𝖣𝖤𝖥𝖦𝖧𝖨𝖩𝖪𝖫𝖬𝖭𝖮𝖯𝖰𝖱𝖲𝖳𝖴𝖵𝖶𝖷𝖸𝖹
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bot/cogs/off_topic_names.py`
Content:
```
1 import asyncio
2 import logging
3 from datetime import datetime, timedelta
4
5 from discord import Colour, Embed
6 from discord.ext.commands import BadArgument, Bot, Context, Converter, group
7
8 from bot.constants import Channels, Keys, MODERATION_ROLES, URLs
9 from bot.decorators import with_role
10 from bot.pagination import LinePaginator
11
12
13 CHANNELS = (Channels.off_topic_0, Channels.off_topic_1, Channels.off_topic_2)
14 log = logging.getLogger(__name__)
15
16
17 class OffTopicName(Converter):
18 """A converter that ensures an added off-topic name is valid."""
19
20 @staticmethod
21 async def convert(ctx: Context, argument: str):
22 allowed_characters = ("-", "’", "'", "`")
23
24 if not (2 <= len(argument) <= 96):
25 raise BadArgument("Channel name must be between 2 and 96 chars long")
26
27 elif not all(c.isalnum() or c in allowed_characters for c in argument):
28 raise BadArgument(
29 "Channel name must only consist of "
30 "alphanumeric characters, minus signs or apostrophes."
31 )
32
33 elif not argument.islower():
34 raise BadArgument("Channel name must be lowercase")
35
36 # Replace some unusable apostrophe-like characters with "’".
37 return argument.replace("'", "’").replace("`", "’")
38
39
40 async def update_names(bot: Bot, headers: dict):
41 """
42 The background updater task that performs a channel name update daily.
43
44 Args:
45 bot (Bot):
46 The running bot instance, used for fetching data from the
47 website via the bot's `http_session`.
48 """
49
50 while True:
51 # Since we truncate the compute timedelta to seconds, we add one second to ensure
52 # we go past midnight in the `seconds_to_sleep` set below.
53 today_at_midnight = datetime.utcnow().replace(microsecond=0, second=0, minute=0, hour=0)
54 next_midnight = today_at_midnight + timedelta(days=1)
55 seconds_to_sleep = (next_midnight - datetime.utcnow()).seconds + 1
56 await asyncio.sleep(seconds_to_sleep)
57
58 response = await bot.http_session.get(
59 f'{URLs.site_off_topic_names_api}?random_items=3',
60 headers=headers
61 )
62 channel_0_name, channel_1_name, channel_2_name = await response.json()
63 channel_0, channel_1, channel_2 = (bot.get_channel(channel_id) for channel_id in CHANNELS)
64
65 await channel_0.edit(name=f'ot0-{channel_0_name}')
66 await channel_1.edit(name=f'ot1-{channel_1_name}')
67 await channel_2.edit(name=f'ot2-{channel_2_name}')
68 log.debug(
69 "Updated off-topic channel names to"
70 f" {channel_0_name}, {channel_1_name} and {channel_2_name}"
71 )
72
73
74 class OffTopicNames:
75 """Commands related to managing the off-topic category channel names."""
76
77 def __init__(self, bot: Bot):
78 self.bot = bot
79 self.headers = {"X-API-KEY": Keys.site_api}
80 self.updater_task = None
81
82 def __cleanup(self):
83 if self.updater_task is not None:
84 self.updater_task.cancel()
85
86 async def on_ready(self):
87 if self.updater_task is None:
88 coro = update_names(self.bot, self.headers)
89 self.updater_task = self.bot.loop.create_task(coro)
90
91 @group(name='otname', aliases=('otnames', 'otn'), invoke_without_command=True)
92 @with_role(*MODERATION_ROLES)
93 async def otname_group(self, ctx):
94 """Add or list items from the off-topic channel name rotation."""
95
96 await ctx.invoke(self.bot.get_command("help"), "otname")
97
98 @otname_group.command(name='add', aliases=('a',))
99 @with_role(*MODERATION_ROLES)
100 async def add_command(self, ctx, name: OffTopicName):
101 """Adds a new off-topic name to the rotation."""
102
103 result = await self.bot.http_session.post(
104 URLs.site_off_topic_names_api,
105 headers=self.headers,
106 params={'name': name}
107 )
108
109 response = await result.json()
110
111 if result.status == 200:
112 log.info(
113 f"{ctx.author.name}#{ctx.author.discriminator}"
114 f" added the off-topic channel name '{name}"
115 )
116 await ctx.send(":ok_hand:")
117 else:
118 error_reason = response.get('message', "No reason provided.")
119 await ctx.send(f":warning: got non-200 from the API: {error_reason}")
120
121 @otname_group.command(name='delete', aliases=('remove', 'rm', 'del', 'd'))
122 @with_role(*MODERATION_ROLES)
123 async def delete_command(self, ctx, name: OffTopicName):
124 """Removes a off-topic name from the rotation."""
125
126 result = await self.bot.http_session.delete(
127 URLs.site_off_topic_names_api,
128 headers=self.headers,
129 params={'name': name}
130 )
131
132 response = await result.json()
133
134 if result.status == 200:
135 if response['deleted'] == 0:
136 await ctx.send(f":warning: No name matching `{name}` was found in the database.")
137 else:
138 log.info(
139 f"{ctx.author.name}#{ctx.author.discriminator}"
140 f" deleted the off-topic channel name '{name}"
141 )
142 await ctx.send(":ok_hand:")
143 else:
144 error_reason = response.get('message', "No reason provided.")
145 await ctx.send(f":warning: got non-200 from the API: {error_reason}")
146
147 @otname_group.command(name='list', aliases=('l',))
148 @with_role(*MODERATION_ROLES)
149 async def list_command(self, ctx):
150 """
151 Lists all currently known off-topic channel names in a paginator.
152 Restricted to Moderator and above to not spoil the surprise.
153 """
154
155 result = await self.bot.http_session.get(
156 URLs.site_off_topic_names_api,
157 headers=self.headers
158 )
159 response = await result.json()
160 lines = sorted(f"• {name}" for name in response)
161
162 embed = Embed(
163 title=f"Known off-topic names (`{len(response)}` total)",
164 colour=Colour.blue()
165 )
166 await LinePaginator.paginate(lines, ctx, embed, max_size=400, empty=False)
167
168
169 def setup(bot: Bot):
170 bot.add_cog(OffTopicNames(bot))
171 log.info("Cog loaded: OffTopicNames")
172
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/bot/cogs/off_topic_names.py b/bot/cogs/off_topic_names.py
--- a/bot/cogs/off_topic_names.py
+++ b/bot/cogs/off_topic_names.py
@@ -19,7 +19,7 @@
@staticmethod
async def convert(ctx: Context, argument: str):
- allowed_characters = ("-", "’", "'", "`")
+ allowed_characters = "ABCDEFGHIJKLMNOPQRSTUVWXYZ!?'`"
if not (2 <= len(argument) <= 96):
raise BadArgument("Channel name must be between 2 and 96 chars long")
@@ -30,11 +30,11 @@
"alphanumeric characters, minus signs or apostrophes."
)
- elif not argument.islower():
- raise BadArgument("Channel name must be lowercase")
-
- # Replace some unusable apostrophe-like characters with "’".
- return argument.replace("'", "’").replace("`", "’")
+ # Replace invalid characters with unicode alternatives.
+ table = str.maketrans(
+ allowed_characters, '𝖠𝖡𝖢𝖣𝖤𝖥𝖦𝖧𝖨𝖩𝖪𝖫𝖬𝖭𝖮𝖯𝖰𝖱𝖲𝖳𝖴𝖵𝖶𝖷𝖸𝖹ǃ?’’'
+ )
+ return argument.translate(table)
async def update_names(bot: Bot, headers: dict):
|
{"golden_diff": "diff --git a/bot/cogs/off_topic_names.py b/bot/cogs/off_topic_names.py\n--- a/bot/cogs/off_topic_names.py\n+++ b/bot/cogs/off_topic_names.py\n@@ -19,7 +19,7 @@\n \n @staticmethod\n async def convert(ctx: Context, argument: str):\n- allowed_characters = (\"-\", \"\u2019\", \"'\", \"`\")\n+ allowed_characters = \"ABCDEFGHIJKLMNOPQRSTUVWXYZ!?'`\"\n \n if not (2 <= len(argument) <= 96):\n raise BadArgument(\"Channel name must be between 2 and 96 chars long\")\n@@ -30,11 +30,11 @@\n \"alphanumeric characters, minus signs or apostrophes.\"\n )\n \n- elif not argument.islower():\n- raise BadArgument(\"Channel name must be lowercase\")\n-\n- # Replace some unusable apostrophe-like characters with \"\u2019\".\n- return argument.replace(\"'\", \"\u2019\").replace(\"`\", \"\u2019\")\n+ # Replace invalid characters with unicode alternatives.\n+ table = str.maketrans(\n+ allowed_characters, '\ud835\udda0\ud835\udda1\ud835\udda2\ud835\udda3\ud835\udda4\ud835\udda5\ud835\udda6\ud835\udda7\ud835\udda8\ud835\udda9\ud835\uddaa\ud835\uddab\ud835\uddac\ud835\uddad\ud835\uddae\ud835\uddaf\ud835\uddb0\ud835\uddb1\ud835\uddb2\ud835\uddb3\ud835\uddb4\ud835\uddb5\ud835\uddb6\ud835\uddb7\ud835\uddb8\ud835\uddb9\u01c3\uff1f\u2019\u2019'\n+ )\n+ return argument.translate(table)\n \n \n async def update_names(bot: Bot, headers: dict):\n", "issue": "make !otn accept uppercase and punctuation\nThere are weird unicode alternatives for stuff like capital letters and punctuation that will get through the discord \"only lowercase and dashes\" filters.\r\n\r\nWhen someone uses `!otn a` and tries to add a name with an apostrophe, we currently are transforming those into something that the system will accept. Let's do the same for exclamation points, question marks, and uppercase letters!\r\n\r\n* For exclamation points, use \u01c3\r\n* For questionmarks, use \uff1f\r\n* For uppercase, use \ud835\udda0\ud835\udda1\ud835\udda2\ud835\udda3\ud835\udda4\ud835\udda5\ud835\udda6\ud835\udda7\ud835\udda8\ud835\udda9\ud835\uddaa\ud835\uddab\ud835\uddac\ud835\uddad\ud835\uddae\ud835\uddaf\ud835\uddb0\ud835\uddb1\ud835\uddb2\ud835\uddb3\ud835\uddb4\ud835\uddb5\ud835\uddb6\ud835\uddb7\ud835\uddb8\ud835\uddb9\n", "before_files": [{"content": "import asyncio\nimport logging\nfrom datetime import datetime, timedelta\n\nfrom discord import Colour, Embed\nfrom discord.ext.commands import BadArgument, Bot, Context, Converter, group\n\nfrom bot.constants import Channels, Keys, MODERATION_ROLES, URLs\nfrom bot.decorators import with_role\nfrom bot.pagination import LinePaginator\n\n\nCHANNELS = (Channels.off_topic_0, Channels.off_topic_1, Channels.off_topic_2)\nlog = logging.getLogger(__name__)\n\n\nclass OffTopicName(Converter):\n \"\"\"A converter that ensures an added off-topic name is valid.\"\"\"\n\n @staticmethod\n async def convert(ctx: Context, argument: str):\n allowed_characters = (\"-\", \"\u2019\", \"'\", \"`\")\n\n if not (2 <= len(argument) <= 96):\n raise BadArgument(\"Channel name must be between 2 and 96 chars long\")\n\n elif not all(c.isalnum() or c in allowed_characters for c in argument):\n raise BadArgument(\n \"Channel name must only consist of \"\n \"alphanumeric characters, minus signs or apostrophes.\"\n )\n\n elif not argument.islower():\n raise BadArgument(\"Channel name must be lowercase\")\n\n # Replace some unusable apostrophe-like characters with \"\u2019\".\n return argument.replace(\"'\", \"\u2019\").replace(\"`\", \"\u2019\")\n\n\nasync def update_names(bot: Bot, headers: dict):\n \"\"\"\n The background updater task that performs a channel name update daily.\n\n Args:\n bot (Bot):\n The running bot instance, used for fetching data from the\n website via the bot's `http_session`.\n \"\"\"\n\n while True:\n # Since we truncate the compute timedelta to seconds, we add one second to ensure\n # we go past midnight in the `seconds_to_sleep` set below.\n today_at_midnight = datetime.utcnow().replace(microsecond=0, second=0, minute=0, hour=0)\n next_midnight = today_at_midnight + timedelta(days=1)\n seconds_to_sleep = (next_midnight - datetime.utcnow()).seconds + 1\n await asyncio.sleep(seconds_to_sleep)\n\n response = await bot.http_session.get(\n f'{URLs.site_off_topic_names_api}?random_items=3',\n headers=headers\n )\n channel_0_name, channel_1_name, channel_2_name = await response.json()\n channel_0, channel_1, channel_2 = (bot.get_channel(channel_id) for channel_id in CHANNELS)\n\n await channel_0.edit(name=f'ot0-{channel_0_name}')\n await channel_1.edit(name=f'ot1-{channel_1_name}')\n await channel_2.edit(name=f'ot2-{channel_2_name}')\n log.debug(\n \"Updated off-topic channel names to\"\n f\" {channel_0_name}, {channel_1_name} and {channel_2_name}\"\n )\n\n\nclass OffTopicNames:\n \"\"\"Commands related to managing the off-topic category channel names.\"\"\"\n\n def __init__(self, bot: Bot):\n self.bot = bot\n self.headers = {\"X-API-KEY\": Keys.site_api}\n self.updater_task = None\n\n def __cleanup(self):\n if self.updater_task is not None:\n self.updater_task.cancel()\n\n async def on_ready(self):\n if self.updater_task is None:\n coro = update_names(self.bot, self.headers)\n self.updater_task = self.bot.loop.create_task(coro)\n\n @group(name='otname', aliases=('otnames', 'otn'), invoke_without_command=True)\n @with_role(*MODERATION_ROLES)\n async def otname_group(self, ctx):\n \"\"\"Add or list items from the off-topic channel name rotation.\"\"\"\n\n await ctx.invoke(self.bot.get_command(\"help\"), \"otname\")\n\n @otname_group.command(name='add', aliases=('a',))\n @with_role(*MODERATION_ROLES)\n async def add_command(self, ctx, name: OffTopicName):\n \"\"\"Adds a new off-topic name to the rotation.\"\"\"\n\n result = await self.bot.http_session.post(\n URLs.site_off_topic_names_api,\n headers=self.headers,\n params={'name': name}\n )\n\n response = await result.json()\n\n if result.status == 200:\n log.info(\n f\"{ctx.author.name}#{ctx.author.discriminator}\"\n f\" added the off-topic channel name '{name}\"\n )\n await ctx.send(\":ok_hand:\")\n else:\n error_reason = response.get('message', \"No reason provided.\")\n await ctx.send(f\":warning: got non-200 from the API: {error_reason}\")\n\n @otname_group.command(name='delete', aliases=('remove', 'rm', 'del', 'd'))\n @with_role(*MODERATION_ROLES)\n async def delete_command(self, ctx, name: OffTopicName):\n \"\"\"Removes a off-topic name from the rotation.\"\"\"\n\n result = await self.bot.http_session.delete(\n URLs.site_off_topic_names_api,\n headers=self.headers,\n params={'name': name}\n )\n\n response = await result.json()\n\n if result.status == 200:\n if response['deleted'] == 0:\n await ctx.send(f\":warning: No name matching `{name}` was found in the database.\")\n else:\n log.info(\n f\"{ctx.author.name}#{ctx.author.discriminator}\"\n f\" deleted the off-topic channel name '{name}\"\n )\n await ctx.send(\":ok_hand:\")\n else:\n error_reason = response.get('message', \"No reason provided.\")\n await ctx.send(f\":warning: got non-200 from the API: {error_reason}\")\n\n @otname_group.command(name='list', aliases=('l',))\n @with_role(*MODERATION_ROLES)\n async def list_command(self, ctx):\n \"\"\"\n Lists all currently known off-topic channel names in a paginator.\n Restricted to Moderator and above to not spoil the surprise.\n \"\"\"\n\n result = await self.bot.http_session.get(\n URLs.site_off_topic_names_api,\n headers=self.headers\n )\n response = await result.json()\n lines = sorted(f\"\u2022 {name}\" for name in response)\n\n embed = Embed(\n title=f\"Known off-topic names (`{len(response)}` total)\",\n colour=Colour.blue()\n )\n await LinePaginator.paginate(lines, ctx, embed, max_size=400, empty=False)\n\n\ndef setup(bot: Bot):\n bot.add_cog(OffTopicNames(bot))\n log.info(\"Cog loaded: OffTopicNames\")\n", "path": "bot/cogs/off_topic_names.py"}], "after_files": [{"content": "import asyncio\nimport logging\nfrom datetime import datetime, timedelta\n\nfrom discord import Colour, Embed\nfrom discord.ext.commands import BadArgument, Bot, Context, Converter, group\n\nfrom bot.constants import Channels, Keys, MODERATION_ROLES, URLs\nfrom bot.decorators import with_role\nfrom bot.pagination import LinePaginator\n\n\nCHANNELS = (Channels.off_topic_0, Channels.off_topic_1, Channels.off_topic_2)\nlog = logging.getLogger(__name__)\n\n\nclass OffTopicName(Converter):\n \"\"\"A converter that ensures an added off-topic name is valid.\"\"\"\n\n @staticmethod\n async def convert(ctx: Context, argument: str):\n allowed_characters = \"ABCDEFGHIJKLMNOPQRSTUVWXYZ!?'`\"\n\n if not (2 <= len(argument) <= 96):\n raise BadArgument(\"Channel name must be between 2 and 96 chars long\")\n\n elif not all(c.isalnum() or c in allowed_characters for c in argument):\n raise BadArgument(\n \"Channel name must only consist of \"\n \"alphanumeric characters, minus signs or apostrophes.\"\n )\n\n # Replace invalid characters with unicode alternatives.\n table = str.maketrans(\n allowed_characters, '\ud835\udda0\ud835\udda1\ud835\udda2\ud835\udda3\ud835\udda4\ud835\udda5\ud835\udda6\ud835\udda7\ud835\udda8\ud835\udda9\ud835\uddaa\ud835\uddab\ud835\uddac\ud835\uddad\ud835\uddae\ud835\uddaf\ud835\uddb0\ud835\uddb1\ud835\uddb2\ud835\uddb3\ud835\uddb4\ud835\uddb5\ud835\uddb6\ud835\uddb7\ud835\uddb8\ud835\uddb9\u01c3\uff1f\u2019\u2019'\n )\n return argument.translate(table)\n\n\nasync def update_names(bot: Bot, headers: dict):\n \"\"\"\n The background updater task that performs a channel name update daily.\n\n Args:\n bot (Bot):\n The running bot instance, used for fetching data from the\n website via the bot's `http_session`.\n \"\"\"\n\n while True:\n # Since we truncate the compute timedelta to seconds, we add one second to ensure\n # we go past midnight in the `seconds_to_sleep` set below.\n today_at_midnight = datetime.utcnow().replace(microsecond=0, second=0, minute=0, hour=0)\n next_midnight = today_at_midnight + timedelta(days=1)\n seconds_to_sleep = (next_midnight - datetime.utcnow()).seconds + 1\n await asyncio.sleep(seconds_to_sleep)\n\n response = await bot.http_session.get(\n f'{URLs.site_off_topic_names_api}?random_items=3',\n headers=headers\n )\n channel_0_name, channel_1_name, channel_2_name = await response.json()\n channel_0, channel_1, channel_2 = (bot.get_channel(channel_id) for channel_id in CHANNELS)\n\n await channel_0.edit(name=f'ot0-{channel_0_name}')\n await channel_1.edit(name=f'ot1-{channel_1_name}')\n await channel_2.edit(name=f'ot2-{channel_2_name}')\n log.debug(\n \"Updated off-topic channel names to\"\n f\" {channel_0_name}, {channel_1_name} and {channel_2_name}\"\n )\n\n\nclass OffTopicNames:\n \"\"\"Commands related to managing the off-topic category channel names.\"\"\"\n\n def __init__(self, bot: Bot):\n self.bot = bot\n self.headers = {\"X-API-KEY\": Keys.site_api}\n self.updater_task = None\n\n def __cleanup(self):\n if self.updater_task is not None:\n self.updater_task.cancel()\n\n async def on_ready(self):\n if self.updater_task is None:\n coro = update_names(self.bot, self.headers)\n self.updater_task = self.bot.loop.create_task(coro)\n\n @group(name='otname', aliases=('otnames', 'otn'), invoke_without_command=True)\n @with_role(*MODERATION_ROLES)\n async def otname_group(self, ctx):\n \"\"\"Add or list items from the off-topic channel name rotation.\"\"\"\n\n await ctx.invoke(self.bot.get_command(\"help\"), \"otname\")\n\n @otname_group.command(name='add', aliases=('a',))\n @with_role(*MODERATION_ROLES)\n async def add_command(self, ctx, name: OffTopicName):\n \"\"\"Adds a new off-topic name to the rotation.\"\"\"\n\n result = await self.bot.http_session.post(\n URLs.site_off_topic_names_api,\n headers=self.headers,\n params={'name': name}\n )\n\n response = await result.json()\n\n if result.status == 200:\n log.info(\n f\"{ctx.author.name}#{ctx.author.discriminator}\"\n f\" added the off-topic channel name '{name}\"\n )\n await ctx.send(\":ok_hand:\")\n else:\n error_reason = response.get('message', \"No reason provided.\")\n await ctx.send(f\":warning: got non-200 from the API: {error_reason}\")\n\n @otname_group.command(name='delete', aliases=('remove', 'rm', 'del', 'd'))\n @with_role(*MODERATION_ROLES)\n async def delete_command(self, ctx, name: OffTopicName):\n \"\"\"Removes a off-topic name from the rotation.\"\"\"\n\n result = await self.bot.http_session.delete(\n URLs.site_off_topic_names_api,\n headers=self.headers,\n params={'name': name}\n )\n\n response = await result.json()\n\n if result.status == 200:\n if response['deleted'] == 0:\n await ctx.send(f\":warning: No name matching `{name}` was found in the database.\")\n else:\n log.info(\n f\"{ctx.author.name}#{ctx.author.discriminator}\"\n f\" deleted the off-topic channel name '{name}\"\n )\n await ctx.send(\":ok_hand:\")\n else:\n error_reason = response.get('message', \"No reason provided.\")\n await ctx.send(f\":warning: got non-200 from the API: {error_reason}\")\n\n @otname_group.command(name='list', aliases=('l',))\n @with_role(*MODERATION_ROLES)\n async def list_command(self, ctx):\n \"\"\"\n Lists all currently known off-topic channel names in a paginator.\n Restricted to Moderator and above to not spoil the surprise.\n \"\"\"\n\n result = await self.bot.http_session.get(\n URLs.site_off_topic_names_api,\n headers=self.headers\n )\n response = await result.json()\n lines = sorted(f\"\u2022 {name}\" for name in response)\n\n embed = Embed(\n title=f\"Known off-topic names (`{len(response)}` total)\",\n colour=Colour.blue()\n )\n await LinePaginator.paginate(lines, ctx, embed, max_size=400, empty=False)\n\n\ndef setup(bot: Bot):\n bot.add_cog(OffTopicNames(bot))\n log.info(\"Cog loaded: OffTopicNames\")\n", "path": "bot/cogs/off_topic_names.py"}]}
| 2,260 | 318 |
gh_patches_debug_56601
|
rasdani/github-patches
|
git_diff
|
RedHatInsights__insights-core-2351
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Double Check satellite_version.CapsuleVersion Combiner
CapsuleVersion Combiner currently relies on satellite-capsule package.
satellite-capsule package can be installed on satellite server t0o and plugin rules may use the combiner to wrongly identify satellite server as capsule.
"foreman" package would not be there on Capsule server and hence can be added as a check.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `insights/combiners/satellite_version.py`
Content:
```
1 """
2 Satellite Version
3 =================
4
5 The following modules are included:
6
7 SatelliteVersion - Version of Satellite Server
8 ----------------------------------------------
9 Combiner to get Satellite Server version information.
10
11 CapsuleVersion - Version of Satellite Capsule (>=6.2)
12 -----------------------------------------------------
13 Combiner to get Satellite Capsule version information. ONLY Satellite Capsule
14 6.2 and newer are supported.
15
16
17 """
18
19 from insights import combiner, SkipComponent
20 from insights.parsers.satellite_version import Satellite6Version as Sat6Ver
21 from insights.parsers.installed_rpms import InstalledRpms
22
23
24 # NOTE:
25 # The following table only tracks 6.0.x and 6.1.x.
26 # See https://access.redhat.com/articles/1343683
27 # But, there are some mistakes in the KCS, the versions in below map
28 # are corrected according to the corresponding ERRATA pages.
29 #
30 # Update: Thu Nov 14 10:51:19 CST 2019
31 #
32 sat6_ver_map = {
33 # Sat foreman candlepin katello
34 '6.0.8': ('1.6.0.53', '0.9.23', '1.5.0'),
35 '6.1.1': ('1.7.2.33', '0.9.49.3', '2.2.0.14'),
36 '6.1.2': ('1.7.2.36', '0.9.49.6', '2.2.0.16'),
37 '6.1.3': ('1.7.2.43', '0.9.49.8', '2.2.0.16'),
38 '6.1.4': ('1.7.2.46', '0.9.49.9', '2.2.0.16'),
39 '6.1.5': ('1.7.2.49', '0.9.49.9', '2.2.0.16'),
40 '6.1.6': ('1.7.2.50', '0.9.49.9', '2.2.0.17'),
41 '6.1.7': ('1.7.2.53', '0.9.49.11', '2.2.0.17'),
42 '6.1.8': ('1.7.2.55', '0.9.49.12', '2.2.0.19'),
43 '6.1.9': ('1.7.2.56', '0.9.49.12', '2.2.0.19'),
44 '6.1.10': ('1.7.2.61', '0.9.49.16', '2.2.0.19'),
45 '6.1.11': ('1.7.2.62', '0.9.49.19', '2.2.0.19'),
46 '6.1.12': ('1.7.2.63', '0.9.49.23', '2.2.0.19'),
47 }
48
49
50 def _parse_sat_version(version):
51 ver_sp = version.split(".") if version else []
52 major = int(ver_sp[0]) if ver_sp and ver_sp[0].isdigit() else None
53 minor = int(ver_sp[1]) if len(ver_sp) > 1 and ver_sp[1].isdigit() else None
54 return [major, minor]
55
56
57 @combiner(InstalledRpms, optional=[Sat6Ver])
58 class SatelliteVersion(object):
59 """
60 Check the parsers
61 :class:`insights.parsers.satellite_version.Satellite6Version` and
62 :class:`insights.parsers.installed_rpms.InstalledRpms` for satellite version
63 information.
64
65 Below is the logic to determine the satellite version::
66
67 1. For Satellite 6.1:
68
69 a. Check the version information in below files at first
70 - https://access.redhat.com/solutions/1392633
71 File: /usr/share/foreman/lib/satellite/version.rb
72
73 b. Check the version of package foreman, candlepin and katello, E.g.
74 - https://access.redhat.com/articles/1343683
75
76 Sat 6.0.8 Sat 6.1.10 Sat 6.1.11
77 foreman 1.6.0.53 1.7.2.61 1.7.2.62
78 candlepin 0.9.23 0.9.49.16 0.9.49.19
79 katello 1.5.0 2.2.0 2.2.0
80
81 2. For Satellite 6.2 and newer:
82
83 Check the version of satellite package directly:
84 - https://access.redhat.com/solutions/1392633
85
86 Sat 6.0.x Sat 6.1.x Sat 6.2.x
87 satellite - - 6.2.x
88
89 3. For Satellite 5.x
90 - https://access.redhat.com/solutions/1224043
91 NOTE: Because of satellite-branding is not deployed in Satellite
92 5.0~5.2, and satellite-schema can also be used for checking
93 the version, here checked satellite-schema instead of
94 satellite-branding.
95
96 Check the version of package satellite-schema directly:
97
98 Sat 5.0~5.2 Sat 5.3 ~
99 rhn-satellite-schema ok -
100 satellite-schema - ok
101
102 Attributes:
103 full(str): the full version format like `version-release`.
104 version(str): the satellite version do not includes `release`.
105 release(str): the `release` string in the version.
106 major(int): the major version.
107 minor(int): the minor version.
108
109 Raises:
110 SkipComponent: When it's not a Satellite machine or the Satellite
111 version cannot be determined according to current information.
112
113 Examples:
114 >>> sat_ver.full == 'satellite-6.2.0.11-1.el7sat'
115 True
116 >>> sat_ver.major
117 6
118 >>> sat_ver.minor
119 2
120 >>> sat_ver.version
121 '6.2.0.11'
122 >>> sat_ver.release
123 '1.el7sat'
124 """
125 def __init__(self, rpms, sat6_ver):
126 self.full = None
127 self.version = None
128 self.release = None
129 self.major = None
130 self.minor = None
131
132 # For Satellite 6.1, if satellite_version/version.rb is available:
133 if sat6_ver:
134 # no 'release' in this case, but more accurate
135 self.full = sat6_ver.full
136 self.version = sat6_ver.version
137 self.major = sat6_ver.major
138 self.minor = sat6_ver.minor
139 else:
140 # For Satellite 6.2 and newer, check the satellite package directly
141 sat62_pkg = rpms.get_max('satellite')
142 if sat62_pkg:
143 self.full = sat62_pkg.package
144 self.version = sat62_pkg.version
145 self.release = sat62_pkg.release
146 self.major, self.minor = _parse_sat_version(self.version)
147 else:
148 # For Satellite 6.0/6.1, check the version of:
149 # - foreman, candlepin and katello
150 fman = rpms.get_max('foreman')
151 cndp = rpms.get_max('candlepin')
152 ktlo = rpms.get_max('katello')
153 if fman and cndp and ktlo:
154 for sat_ver, map_ver in sat6_ver_map.items():
155 if all(pkg.version.startswith(mv) for pkg, mv in zip([fman, cndp, ktlo], map_ver)):
156 # no 'release' in this situation
157 self.major, self.minor = _parse_sat_version(sat_ver)
158 self.full = self.version = sat_ver
159 else:
160 # For Satellite 5.x
161 sat5_pkg = rpms.get_max('satellite-schema') or rpms.get_max('rhn-satellite-schema')
162 if sat5_pkg:
163 self.full = sat5_pkg.package
164 self.version = sat5_pkg.version
165 self.release = sat5_pkg.release
166 self.major, self.minor = _parse_sat_version(self.version)
167 if not self.full:
168 raise SkipComponent("Not a Satellite machine or unable to determine Satellite version")
169
170
171 @combiner(InstalledRpms)
172 class CapsuleVersion(object):
173 """
174 Check the parser
175 :class:`insights.parsers.installed_rpms.InstalledRpms` for satellite capsule
176 version information.
177
178 .. note::
179 ONLY Satellite Capsule 6.2 and newer are supported.
180
181 Below is the logic to determine the satellite version::
182
183 Check the version of satellite/satellite-capsule directly:
184 - https://access.redhat.com/solutions/1392633
185
186 Sat 6.0.x Sat 6.1.x Sat 6.2.x
187 satellite-capsule - - 6.2.x
188
189 Attributes:
190 full(str): the full version format like `version-release`.
191 version(str): the satellite version do not includes `release`.
192 release(str): the `release` string in the version.
193 major(int): the major version.
194 minor(int): the minor version.
195
196 Raises:
197 SkipComponent: When it's not a Satellite Capsule machine or the
198 Satellite Capsule version cannot be determined according to
199 current information.
200
201 Examples:
202 >>> cap_ver.full == 'satellite-capsule-6.2.0.11-1.el7sat'
203 True
204 >>> cap_ver.major
205 6
206 >>> cap_ver.minor
207 2
208 >>> cap_ver.version
209 '6.2.0.11'
210 >>> cap_ver.release
211 '1.el7sat'
212 """
213 def __init__(self, rpms):
214 self.full = None
215 self.version = None
216 self.release = None
217 self.major = None
218 self.minor = None
219
220 # For Capsule, ONLY 6.2 and newer are supported
221 sat62_pkg = rpms.get_max('satellite-capsule')
222 if sat62_pkg:
223 self.full = sat62_pkg.package
224 self.version = sat62_pkg.version
225 self.release = sat62_pkg.release
226 self.major, self.minor = _parse_sat_version(self.version)
227 else:
228 raise SkipComponent("Not a Satellite Capsule machine or unable to determine the version")
229
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/insights/combiners/satellite_version.py b/insights/combiners/satellite_version.py
--- a/insights/combiners/satellite_version.py
+++ b/insights/combiners/satellite_version.py
@@ -219,7 +219,8 @@
# For Capsule, ONLY 6.2 and newer are supported
sat62_pkg = rpms.get_max('satellite-capsule')
- if sat62_pkg:
+ # foreman package should not be there on Capsule Server
+ if sat62_pkg and 'foreman' not in rpms:
self.full = sat62_pkg.package
self.version = sat62_pkg.version
self.release = sat62_pkg.release
|
{"golden_diff": "diff --git a/insights/combiners/satellite_version.py b/insights/combiners/satellite_version.py\n--- a/insights/combiners/satellite_version.py\n+++ b/insights/combiners/satellite_version.py\n@@ -219,7 +219,8 @@\n \n # For Capsule, ONLY 6.2 and newer are supported\n sat62_pkg = rpms.get_max('satellite-capsule')\n- if sat62_pkg:\n+ # foreman package should not be there on Capsule Server\n+ if sat62_pkg and 'foreman' not in rpms:\n self.full = sat62_pkg.package\n self.version = sat62_pkg.version\n self.release = sat62_pkg.release\n", "issue": "Double Check satellite_version.CapsuleVersion Combiner\nCapsuleVersion Combiner currently relies on satellite-capsule package.\r\n\r\nsatellite-capsule package can be installed on satellite server t0o and plugin rules may use the combiner to wrongly identify satellite server as capsule.\r\n\r\n\"foreman\" package would not be there on Capsule server and hence can be added as a check. \n", "before_files": [{"content": "\"\"\"\nSatellite Version\n=================\n\nThe following modules are included:\n\nSatelliteVersion - Version of Satellite Server\n----------------------------------------------\nCombiner to get Satellite Server version information.\n\nCapsuleVersion - Version of Satellite Capsule (>=6.2)\n-----------------------------------------------------\nCombiner to get Satellite Capsule version information. ONLY Satellite Capsule\n6.2 and newer are supported.\n\n\n\"\"\"\n\nfrom insights import combiner, SkipComponent\nfrom insights.parsers.satellite_version import Satellite6Version as Sat6Ver\nfrom insights.parsers.installed_rpms import InstalledRpms\n\n\n# NOTE:\n# The following table only tracks 6.0.x and 6.1.x.\n# See https://access.redhat.com/articles/1343683\n# But, there are some mistakes in the KCS, the versions in below map\n# are corrected according to the corresponding ERRATA pages.\n#\n# Update: Thu Nov 14 10:51:19 CST 2019\n#\nsat6_ver_map = {\n # Sat foreman candlepin katello\n '6.0.8': ('1.6.0.53', '0.9.23', '1.5.0'),\n '6.1.1': ('1.7.2.33', '0.9.49.3', '2.2.0.14'),\n '6.1.2': ('1.7.2.36', '0.9.49.6', '2.2.0.16'),\n '6.1.3': ('1.7.2.43', '0.9.49.8', '2.2.0.16'),\n '6.1.4': ('1.7.2.46', '0.9.49.9', '2.2.0.16'),\n '6.1.5': ('1.7.2.49', '0.9.49.9', '2.2.0.16'),\n '6.1.6': ('1.7.2.50', '0.9.49.9', '2.2.0.17'),\n '6.1.7': ('1.7.2.53', '0.9.49.11', '2.2.0.17'),\n '6.1.8': ('1.7.2.55', '0.9.49.12', '2.2.0.19'),\n '6.1.9': ('1.7.2.56', '0.9.49.12', '2.2.0.19'),\n '6.1.10': ('1.7.2.61', '0.9.49.16', '2.2.0.19'),\n '6.1.11': ('1.7.2.62', '0.9.49.19', '2.2.0.19'),\n '6.1.12': ('1.7.2.63', '0.9.49.23', '2.2.0.19'),\n}\n\n\ndef _parse_sat_version(version):\n ver_sp = version.split(\".\") if version else []\n major = int(ver_sp[0]) if ver_sp and ver_sp[0].isdigit() else None\n minor = int(ver_sp[1]) if len(ver_sp) > 1 and ver_sp[1].isdigit() else None\n return [major, minor]\n\n\n@combiner(InstalledRpms, optional=[Sat6Ver])\nclass SatelliteVersion(object):\n \"\"\"\n Check the parsers\n :class:`insights.parsers.satellite_version.Satellite6Version` and\n :class:`insights.parsers.installed_rpms.InstalledRpms` for satellite version\n information.\n\n Below is the logic to determine the satellite version::\n\n 1. For Satellite 6.1:\n\n a. Check the version information in below files at first\n - https://access.redhat.com/solutions/1392633\n File: /usr/share/foreman/lib/satellite/version.rb\n\n b. Check the version of package foreman, candlepin and katello, E.g.\n - https://access.redhat.com/articles/1343683\n\n Sat 6.0.8 Sat 6.1.10 Sat 6.1.11\n foreman 1.6.0.53 1.7.2.61 1.7.2.62\n candlepin 0.9.23 0.9.49.16 0.9.49.19\n katello 1.5.0 2.2.0 2.2.0\n\n 2. For Satellite 6.2 and newer:\n\n Check the version of satellite package directly:\n - https://access.redhat.com/solutions/1392633\n\n Sat 6.0.x Sat 6.1.x Sat 6.2.x\n satellite - - 6.2.x\n\n 3. For Satellite 5.x\n - https://access.redhat.com/solutions/1224043\n NOTE: Because of satellite-branding is not deployed in Satellite\n 5.0~5.2, and satellite-schema can also be used for checking\n the version, here checked satellite-schema instead of\n satellite-branding.\n\n Check the version of package satellite-schema directly:\n\n Sat 5.0~5.2 Sat 5.3 ~\n rhn-satellite-schema ok -\n satellite-schema - ok\n\n Attributes:\n full(str): the full version format like `version-release`.\n version(str): the satellite version do not includes `release`.\n release(str): the `release` string in the version.\n major(int): the major version.\n minor(int): the minor version.\n\n Raises:\n SkipComponent: When it's not a Satellite machine or the Satellite\n version cannot be determined according to current information.\n\n Examples:\n >>> sat_ver.full == 'satellite-6.2.0.11-1.el7sat'\n True\n >>> sat_ver.major\n 6\n >>> sat_ver.minor\n 2\n >>> sat_ver.version\n '6.2.0.11'\n >>> sat_ver.release\n '1.el7sat'\n \"\"\"\n def __init__(self, rpms, sat6_ver):\n self.full = None\n self.version = None\n self.release = None\n self.major = None\n self.minor = None\n\n # For Satellite 6.1, if satellite_version/version.rb is available:\n if sat6_ver:\n # no 'release' in this case, but more accurate\n self.full = sat6_ver.full\n self.version = sat6_ver.version\n self.major = sat6_ver.major\n self.minor = sat6_ver.minor\n else:\n # For Satellite 6.2 and newer, check the satellite package directly\n sat62_pkg = rpms.get_max('satellite')\n if sat62_pkg:\n self.full = sat62_pkg.package\n self.version = sat62_pkg.version\n self.release = sat62_pkg.release\n self.major, self.minor = _parse_sat_version(self.version)\n else:\n # For Satellite 6.0/6.1, check the version of:\n # - foreman, candlepin and katello\n fman = rpms.get_max('foreman')\n cndp = rpms.get_max('candlepin')\n ktlo = rpms.get_max('katello')\n if fman and cndp and ktlo:\n for sat_ver, map_ver in sat6_ver_map.items():\n if all(pkg.version.startswith(mv) for pkg, mv in zip([fman, cndp, ktlo], map_ver)):\n # no 'release' in this situation\n self.major, self.minor = _parse_sat_version(sat_ver)\n self.full = self.version = sat_ver\n else:\n # For Satellite 5.x\n sat5_pkg = rpms.get_max('satellite-schema') or rpms.get_max('rhn-satellite-schema')\n if sat5_pkg:\n self.full = sat5_pkg.package\n self.version = sat5_pkg.version\n self.release = sat5_pkg.release\n self.major, self.minor = _parse_sat_version(self.version)\n if not self.full:\n raise SkipComponent(\"Not a Satellite machine or unable to determine Satellite version\")\n\n\n@combiner(InstalledRpms)\nclass CapsuleVersion(object):\n \"\"\"\n Check the parser\n :class:`insights.parsers.installed_rpms.InstalledRpms` for satellite capsule\n version information.\n\n .. note::\n ONLY Satellite Capsule 6.2 and newer are supported.\n\n Below is the logic to determine the satellite version::\n\n Check the version of satellite/satellite-capsule directly:\n - https://access.redhat.com/solutions/1392633\n\n Sat 6.0.x Sat 6.1.x Sat 6.2.x\n satellite-capsule - - 6.2.x\n\n Attributes:\n full(str): the full version format like `version-release`.\n version(str): the satellite version do not includes `release`.\n release(str): the `release` string in the version.\n major(int): the major version.\n minor(int): the minor version.\n\n Raises:\n SkipComponent: When it's not a Satellite Capsule machine or the\n Satellite Capsule version cannot be determined according to\n current information.\n\n Examples:\n >>> cap_ver.full == 'satellite-capsule-6.2.0.11-1.el7sat'\n True\n >>> cap_ver.major\n 6\n >>> cap_ver.minor\n 2\n >>> cap_ver.version\n '6.2.0.11'\n >>> cap_ver.release\n '1.el7sat'\n \"\"\"\n def __init__(self, rpms):\n self.full = None\n self.version = None\n self.release = None\n self.major = None\n self.minor = None\n\n # For Capsule, ONLY 6.2 and newer are supported\n sat62_pkg = rpms.get_max('satellite-capsule')\n if sat62_pkg:\n self.full = sat62_pkg.package\n self.version = sat62_pkg.version\n self.release = sat62_pkg.release\n self.major, self.minor = _parse_sat_version(self.version)\n else:\n raise SkipComponent(\"Not a Satellite Capsule machine or unable to determine the version\")\n", "path": "insights/combiners/satellite_version.py"}], "after_files": [{"content": "\"\"\"\nSatellite Version\n=================\n\nThe following modules are included:\n\nSatelliteVersion - Version of Satellite Server\n----------------------------------------------\nCombiner to get Satellite Server version information.\n\nCapsuleVersion - Version of Satellite Capsule (>=6.2)\n-----------------------------------------------------\nCombiner to get Satellite Capsule version information. ONLY Satellite Capsule\n6.2 and newer are supported.\n\n\n\"\"\"\n\nfrom insights import combiner, SkipComponent\nfrom insights.parsers.satellite_version import Satellite6Version as Sat6Ver\nfrom insights.parsers.installed_rpms import InstalledRpms\n\n\n# NOTE:\n# The following table only tracks 6.0.x and 6.1.x.\n# See https://access.redhat.com/articles/1343683\n# But, there are some mistakes in the KCS, the versions in below map\n# are corrected according to the corresponding ERRATA pages.\n#\n# Update: Thu Nov 14 10:51:19 CST 2019\n#\nsat6_ver_map = {\n # Sat foreman candlepin katello\n '6.0.8': ('1.6.0.53', '0.9.23', '1.5.0'),\n '6.1.1': ('1.7.2.33', '0.9.49.3', '2.2.0.14'),\n '6.1.2': ('1.7.2.36', '0.9.49.6', '2.2.0.16'),\n '6.1.3': ('1.7.2.43', '0.9.49.8', '2.2.0.16'),\n '6.1.4': ('1.7.2.46', '0.9.49.9', '2.2.0.16'),\n '6.1.5': ('1.7.2.49', '0.9.49.9', '2.2.0.16'),\n '6.1.6': ('1.7.2.50', '0.9.49.9', '2.2.0.17'),\n '6.1.7': ('1.7.2.53', '0.9.49.11', '2.2.0.17'),\n '6.1.8': ('1.7.2.55', '0.9.49.12', '2.2.0.19'),\n '6.1.9': ('1.7.2.56', '0.9.49.12', '2.2.0.19'),\n '6.1.10': ('1.7.2.61', '0.9.49.16', '2.2.0.19'),\n '6.1.11': ('1.7.2.62', '0.9.49.19', '2.2.0.19'),\n '6.1.12': ('1.7.2.63', '0.9.49.23', '2.2.0.19'),\n}\n\n\ndef _parse_sat_version(version):\n ver_sp = version.split(\".\") if version else []\n major = int(ver_sp[0]) if ver_sp and ver_sp[0].isdigit() else None\n minor = int(ver_sp[1]) if len(ver_sp) > 1 and ver_sp[1].isdigit() else None\n return [major, minor]\n\n\n@combiner(InstalledRpms, optional=[Sat6Ver])\nclass SatelliteVersion(object):\n \"\"\"\n Check the parsers\n :class:`insights.parsers.satellite_version.Satellite6Version` and\n :class:`insights.parsers.installed_rpms.InstalledRpms` for satellite version\n information.\n\n Below is the logic to determine the satellite version::\n\n 1. For Satellite 6.1:\n\n a. Check the version information in below files at first\n - https://access.redhat.com/solutions/1392633\n File: /usr/share/foreman/lib/satellite/version.rb\n\n b. Check the version of package foreman, candlepin and katello, E.g.\n - https://access.redhat.com/articles/1343683\n\n Sat 6.0.8 Sat 6.1.10 Sat 6.1.11\n foreman 1.6.0.53 1.7.2.61 1.7.2.62\n candlepin 0.9.23 0.9.49.16 0.9.49.19\n katello 1.5.0 2.2.0 2.2.0\n\n 2. For Satellite 6.2 and newer:\n\n Check the version of satellite package directly:\n - https://access.redhat.com/solutions/1392633\n\n Sat 6.0.x Sat 6.1.x Sat 6.2.x\n satellite - - 6.2.x\n\n 3. For Satellite 5.x\n - https://access.redhat.com/solutions/1224043\n NOTE: Because of satellite-branding is not deployed in Satellite\n 5.0~5.2, and satellite-schema can also be used for checking\n the version, here checked satellite-schema instead of\n satellite-branding.\n\n Check the version of package satellite-schema directly:\n\n Sat 5.0~5.2 Sat 5.3 ~\n rhn-satellite-schema ok -\n satellite-schema - ok\n\n Attributes:\n full(str): the full version format like `version-release`.\n version(str): the satellite version do not includes `release`.\n release(str): the `release` string in the version.\n major(int): the major version.\n minor(int): the minor version.\n\n Raises:\n SkipComponent: When it's not a Satellite machine or the Satellite\n version cannot be determined according to current information.\n\n Examples:\n >>> sat_ver.full == 'satellite-6.2.0.11-1.el7sat'\n True\n >>> sat_ver.major\n 6\n >>> sat_ver.minor\n 2\n >>> sat_ver.version\n '6.2.0.11'\n >>> sat_ver.release\n '1.el7sat'\n \"\"\"\n def __init__(self, rpms, sat6_ver):\n self.full = None\n self.version = None\n self.release = None\n self.major = None\n self.minor = None\n\n # For Satellite 6.1, if satellite_version/version.rb is available:\n if sat6_ver:\n # no 'release' in this case, but more accurate\n self.full = sat6_ver.full\n self.version = sat6_ver.version\n self.major = sat6_ver.major\n self.minor = sat6_ver.minor\n else:\n # For Satellite 6.2 and newer, check the satellite package directly\n sat62_pkg = rpms.get_max('satellite')\n if sat62_pkg:\n self.full = sat62_pkg.package\n self.version = sat62_pkg.version\n self.release = sat62_pkg.release\n self.major, self.minor = _parse_sat_version(self.version)\n else:\n # For Satellite 6.0/6.1, check the version of:\n # - foreman, candlepin and katello\n fman = rpms.get_max('foreman')\n cndp = rpms.get_max('candlepin')\n ktlo = rpms.get_max('katello')\n if fman and cndp and ktlo:\n for sat_ver, map_ver in sat6_ver_map.items():\n if all(pkg.version.startswith(mv) for pkg, mv in zip([fman, cndp, ktlo], map_ver)):\n # no 'release' in this situation\n self.major, self.minor = _parse_sat_version(sat_ver)\n self.full = self.version = sat_ver\n else:\n # For Satellite 5.x\n sat5_pkg = rpms.get_max('satellite-schema') or rpms.get_max('rhn-satellite-schema')\n if sat5_pkg:\n self.full = sat5_pkg.package\n self.version = sat5_pkg.version\n self.release = sat5_pkg.release\n self.major, self.minor = _parse_sat_version(self.version)\n if not self.full:\n raise SkipComponent(\"Not a Satellite machine or unable to determine Satellite version\")\n\n\n@combiner(InstalledRpms)\nclass CapsuleVersion(object):\n \"\"\"\n Check the parser\n :class:`insights.parsers.installed_rpms.InstalledRpms` for satellite capsule\n version information.\n\n .. note::\n ONLY Satellite Capsule 6.2 and newer are supported.\n\n Below is the logic to determine the satellite version::\n\n Check the version of satellite/satellite-capsule directly:\n - https://access.redhat.com/solutions/1392633\n\n Sat 6.0.x Sat 6.1.x Sat 6.2.x\n satellite-capsule - - 6.2.x\n\n Attributes:\n full(str): the full version format like `version-release`.\n version(str): the satellite version do not includes `release`.\n release(str): the `release` string in the version.\n major(int): the major version.\n minor(int): the minor version.\n\n Raises:\n SkipComponent: When it's not a Satellite Capsule machine or the\n Satellite Capsule version cannot be determined according to\n current information.\n\n Examples:\n >>> cap_ver.full == 'satellite-capsule-6.2.0.11-1.el7sat'\n True\n >>> cap_ver.major\n 6\n >>> cap_ver.minor\n 2\n >>> cap_ver.version\n '6.2.0.11'\n >>> cap_ver.release\n '1.el7sat'\n \"\"\"\n def __init__(self, rpms):\n self.full = None\n self.version = None\n self.release = None\n self.major = None\n self.minor = None\n\n # For Capsule, ONLY 6.2 and newer are supported\n sat62_pkg = rpms.get_max('satellite-capsule')\n # foreman package should not be there on Capsule Server\n if sat62_pkg and 'foreman' not in rpms:\n self.full = sat62_pkg.package\n self.version = sat62_pkg.version\n self.release = sat62_pkg.release\n self.major, self.minor = _parse_sat_version(self.version)\n else:\n raise SkipComponent(\"Not a Satellite Capsule machine or unable to determine the version\")\n", "path": "insights/combiners/satellite_version.py"}]}
| 3,374 | 174 |
gh_patches_debug_34977
|
rasdani/github-patches
|
git_diff
|
avocado-framework__avocado-5196
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Allow the deployment and use of development versions of Avocado
**Is your feature request related to a problem? Please describe.**
Epic issue https://github.com/avocado-framework/avocado/issues/4934 covers aspects such as creating and publishing (AFAICT) packages for every Avocado release, for every Python version, and how they will be deployed and installed on `DeploymentSpawner`s.
But, this doesn't address the development time needs, where unreleased code may be needed in both the job side (say running out of a GIT checkout), and on the isolated spawner environment.
**Describe the solution you'd like**
There should be a way to signal a preference for given wheels to be used, instead of the latest release ones.
**Describe alternatives you've considered**
Building the wheels is probably going to be a separate step (such as the current `make whee`, and It will probably be necessary to refer to them by a local (non HTTP(s)) uri. Bonus point for an all-in-one `--mirror-this-repo-as-a-wheel-and-deploy-it` kind of feature, but I think this will be too complex.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `avocado/plugins/spawners/podman.py`
Content:
```
1 import asyncio
2 import json
3 import logging
4 import os
5 import subprocess
6
7 from avocado.core.plugin_interfaces import CLI, DeploymentSpawner, Init
8 from avocado.core.settings import settings
9 from avocado.core.spawners.common import SpawnerMixin, SpawnMethod
10 from avocado.core.version import VERSION
11 from avocado.utils import distro
12 from avocado.utils.asset import Asset
13 from avocado.utils.podman import Podman, PodmanException
14
15 LOG = logging.getLogger(__name__)
16
17
18 ENTRY_POINT_CMD = "/tmp/avocado-runner"
19
20
21 class PodmanSpawnerInit(Init):
22
23 description = 'Podman (container) based spawner initialization'
24
25 def initialize(self):
26 section = 'spawner.podman'
27
28 help_msg = 'Path to the podman binary'
29 settings.register_option(
30 section=section,
31 key='bin',
32 help_msg=help_msg,
33 default='/usr/bin/podman')
34
35 this_distro = distro.detect()
36 if this_distro != distro.UNKNOWN_DISTRO:
37 default_distro = '{0}:{1}'.format(this_distro.name,
38 this_distro.version)
39 else:
40 default_distro = 'fedora:latest'
41 help_msg = ('Image name to use when creating the container. '
42 'The first default choice is a container image '
43 'matching the current OS. If unable to detect, '
44 'default becomes the latest Fedora release. Default '
45 'on this system: {0}'.format(default_distro))
46 settings.register_option(
47 section=section,
48 key='image',
49 help_msg=help_msg,
50 default=default_distro)
51
52
53 class PodmanCLI(CLI):
54
55 name = 'podman'
56 description = 'podman spawner command line options for "run"'
57
58 def configure(self, parser):
59 super().configure(parser)
60 parser = parser.subcommands.choices.get('run', None)
61 if parser is None:
62 return
63
64 parser = parser.add_argument_group('podman spawner specific options')
65 settings.add_argparser_to_option(namespace='spawner.podman.bin',
66 parser=parser,
67 long_arg='--spawner-podman-bin',
68 metavar='PODMAN_BIN')
69
70 settings.add_argparser_to_option(namespace='spawner.podman.image',
71 parser=parser,
72 long_arg='--spawner-podman-image',
73 metavar='CONTAINER_IMAGE')
74
75 def run(self, config):
76 pass
77
78
79 class PodmanSpawner(DeploymentSpawner, SpawnerMixin):
80
81 description = 'Podman (container) based spawner'
82 METHODS = [SpawnMethod.STANDALONE_EXECUTABLE]
83
84 _PYTHON_VERSIONS_CACHE = {}
85
86 def is_task_alive(self, runtime_task):
87 if runtime_task.spawner_handle is None:
88 return False
89 podman_bin = self.config.get('spawner.podman.bin')
90 cmd = [podman_bin, "ps", "--all", "--format={{.State}}",
91 "--filter=id=%s" % runtime_task.spawner_handle]
92 process = subprocess.Popen(cmd,
93 stdin=subprocess.DEVNULL,
94 stdout=subprocess.PIPE,
95 stderr=subprocess.DEVNULL)
96 out, _ = process.communicate()
97 # FIXME: check how podman 2.x is reporting valid "OK" states
98 return out.startswith(b'Up ')
99
100 def _fetch_asset(self, url):
101 cachedirs = self.config.get('datadir.paths.cache_dirs')
102 asset = Asset(url, cache_dirs=cachedirs)
103 return asset.fetch()
104
105 def get_eggs_paths(self, py_major, py_minor):
106 """Return the basic eggs needed to bootstrap Avocado.
107
108 This will return a tuple with the current location and where this
109 should be deployed.
110 """
111 result = []
112 # Setuptools
113 # For now let's pin to setuptools 59.2.
114 # TODO: Automatically get latest setuptools version.
115 eggs = [f"https://github.com/avocado-framework/setuptools/releases/download/v59.2.0/setuptools-59.2.0-py{py_major}.{py_minor}.egg",
116 f"https://github.com/avocado-framework/avocado/releases/download/{VERSION}/avocado_framework-{VERSION}-py{py_major}.{py_minor}.egg"]
117 for url in eggs:
118 path = self._fetch_asset(url)
119 to = os.path.join('/tmp/', os.path.basename(path))
120 result.append((path, to))
121 return result
122
123 @property
124 async def python_version(self):
125 image = self.config.get('spawner.podman.image')
126 if image not in self._PYTHON_VERSIONS_CACHE:
127 if not self.podman:
128 msg = "Cannot get Python version: self.podman not defined."
129 LOG.debug(msg)
130 return None, None, None
131 result = await self.podman.get_python_version(image)
132 self._PYTHON_VERSIONS_CACHE[image] = result
133 return self._PYTHON_VERSIONS_CACHE[image]
134
135 async def deploy_artifacts(self):
136 pass
137
138 async def deploy_avocado(self, where):
139 # Deploy all the eggs to container inside /tmp/
140 major, minor, _ = await self.python_version
141 eggs = self.get_eggs_paths(major, minor)
142
143 for egg, to in eggs:
144 await self.podman.copy_to_container(where, egg, to)
145
146 async def _create_container_for_task(self, runtime_task, env_args,
147 test_output=None):
148 mount_status_server_socket = False
149 mounted_status_server_socket = '/tmp/.status_server.sock'
150 status_server_uri = runtime_task.task.status_services[0].uri
151 if ':' not in status_server_uri:
152 # a unix domain socket is being used
153 mount_status_server_socket = True
154 runtime_task.task.status_services[0].uri = mounted_status_server_socket
155
156 _, _, python_binary = await self.python_version
157 entry_point_args = [python_binary,
158 '-m',
159 'avocado.core.nrunner',
160 'task-run']
161
162 task = runtime_task.task
163 entry_point_args.extend(task.get_command_args())
164 entry_point = json.dumps(entry_point_args)
165 entry_point_arg = "--entrypoint=" + entry_point
166
167 if mount_status_server_socket:
168 status_server_opts = (
169 "--privileged",
170 "-v", "%s:%s" % (status_server_uri,
171 mounted_status_server_socket)
172 )
173 else:
174 status_server_opts = ("--net=host", )
175
176 output_opts = ()
177 if test_output:
178 podman_output = runtime_task.task.runnable.output_dir
179 output_opts = ("-v", "%s:%s" % (test_output,
180 os.path.expanduser(podman_output)))
181
182 image = self.config.get('spawner.podman.image')
183
184 envs = [f"-e={k}={v}" for k, v in env_args.items()]
185 try:
186 # pylint: disable=W0201
187 _, stdout, _ = await self.podman.execute("create",
188 *status_server_opts,
189 *output_opts,
190 entry_point_arg,
191 *envs,
192 image)
193 except PodmanException as ex:
194 msg = f"Could not create podman container: {ex}"
195 runtime_task.status = msg
196 return False
197
198 return stdout.decode().strip()
199
200 async def spawn_task(self, runtime_task):
201 self.create_task_output_dir(runtime_task)
202 podman_bin = self.config.get('spawner.podman.bin')
203 try:
204 # pylint: disable=W0201
205 self.podman = Podman(podman_bin)
206 except PodmanException as ex:
207 runtime_task.status = str(ex)
208 return False
209
210 major, minor, _ = await self.python_version
211 # Return only the "to" location
212 eggs = self.get_eggs_paths(major, minor)
213 destination_eggs = ":".join(map(lambda egg: str(egg[1]), eggs))
214 env_args = {'PYTHONPATH': destination_eggs}
215 output_dir_path = self.task_output_dir(runtime_task)
216 container_id = await self._create_container_for_task(runtime_task,
217 env_args,
218 output_dir_path)
219
220 runtime_task.spawner_handle = container_id
221
222 await self.deploy_avocado(container_id)
223
224 try:
225 # pylint: disable=W0201
226 returncode, _, _ = await self.podman.start(container_id)
227 except PodmanException as ex:
228 msg = f"Could not start container: {ex}"
229 runtime_task.status = msg
230 LOG.error(msg)
231 return False
232
233 return returncode == 0
234
235 def create_task_output_dir(self, runtime_task):
236 output_dir_path = self.task_output_dir(runtime_task)
237 output_podman_path = '~/avocado/job-results/spawner/task'
238
239 os.makedirs(output_dir_path, exist_ok=True)
240 runtime_task.task.setup_output_dir(output_podman_path)
241
242 async def wait_task(self, runtime_task):
243 while True:
244 if not self.is_task_alive(runtime_task):
245 return
246 await asyncio.sleep(0.1)
247
248 @staticmethod
249 async def check_task_requirements(runtime_task):
250 """Check the runtime task requirements needed to be able to run"""
251 # right now, limit the check to the runner availability.
252 if runtime_task.task.runnable.pick_runner_command() is None:
253 return False
254 return True
255
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/avocado/plugins/spawners/podman.py b/avocado/plugins/spawners/podman.py
--- a/avocado/plugins/spawners/podman.py
+++ b/avocado/plugins/spawners/podman.py
@@ -49,6 +49,16 @@
help_msg=help_msg,
default=default_distro)
+ help_msg = ('Avocado egg path to be used during initial bootstrap '
+ 'of avocado inside the isolated environment. By default, '
+ 'Avocado will try to download (or get from cache) an '
+ 'egg from its repository.')
+
+ settings.register_option(section=section,
+ key='avocado_spawner_egg',
+ help_msg=help_msg,
+ default=None)
+
class PodmanCLI(CLI):
@@ -72,6 +82,13 @@
long_arg='--spawner-podman-image',
metavar='CONTAINER_IMAGE')
+ namespace = 'spawner.podman.avocado_spawner_egg'
+ long_arg = '--spawner-podman-avocado-egg'
+ settings.add_argparser_to_option(namespace=namespace,
+ parser=parser,
+ long_arg=long_arg,
+ metavar='AVOCADO_EGG')
+
def run(self, config):
pass
@@ -112,8 +129,14 @@
# Setuptools
# For now let's pin to setuptools 59.2.
# TODO: Automatically get latest setuptools version.
- eggs = [f"https://github.com/avocado-framework/setuptools/releases/download/v59.2.0/setuptools-59.2.0-py{py_major}.{py_minor}.egg",
- f"https://github.com/avocado-framework/avocado/releases/download/{VERSION}/avocado_framework-{VERSION}-py{py_major}.{py_minor}.egg"]
+ eggs = [f"https://github.com/avocado-framework/setuptools/releases/download/v59.2.0/setuptools-59.2.0-py{py_major}.{py_minor}.egg"]
+ local_egg = self.config.get('spawner.podman.avocado_spawner_egg')
+ if local_egg:
+ eggs.append(local_egg)
+ else:
+ remote_egg = f"https://github.com/avocado-framework/avocado/releases/download/{VERSION}/avocado_framework-{VERSION}-py{py_major}.{py_minor}.egg"
+ eggs.append(remote_egg)
+
for url in eggs:
path = self._fetch_asset(url)
to = os.path.join('/tmp/', os.path.basename(path))
|
{"golden_diff": "diff --git a/avocado/plugins/spawners/podman.py b/avocado/plugins/spawners/podman.py\n--- a/avocado/plugins/spawners/podman.py\n+++ b/avocado/plugins/spawners/podman.py\n@@ -49,6 +49,16 @@\n help_msg=help_msg,\n default=default_distro)\n \n+ help_msg = ('Avocado egg path to be used during initial bootstrap '\n+ 'of avocado inside the isolated environment. By default, '\n+ 'Avocado will try to download (or get from cache) an '\n+ 'egg from its repository.')\n+\n+ settings.register_option(section=section,\n+ key='avocado_spawner_egg',\n+ help_msg=help_msg,\n+ default=None)\n+\n \n class PodmanCLI(CLI):\n \n@@ -72,6 +82,13 @@\n long_arg='--spawner-podman-image',\n metavar='CONTAINER_IMAGE')\n \n+ namespace = 'spawner.podman.avocado_spawner_egg'\n+ long_arg = '--spawner-podman-avocado-egg'\n+ settings.add_argparser_to_option(namespace=namespace,\n+ parser=parser,\n+ long_arg=long_arg,\n+ metavar='AVOCADO_EGG')\n+\n def run(self, config):\n pass\n \n@@ -112,8 +129,14 @@\n # Setuptools\n # For now let's pin to setuptools 59.2.\n # TODO: Automatically get latest setuptools version.\n- eggs = [f\"https://github.com/avocado-framework/setuptools/releases/download/v59.2.0/setuptools-59.2.0-py{py_major}.{py_minor}.egg\",\n- f\"https://github.com/avocado-framework/avocado/releases/download/{VERSION}/avocado_framework-{VERSION}-py{py_major}.{py_minor}.egg\"]\n+ eggs = [f\"https://github.com/avocado-framework/setuptools/releases/download/v59.2.0/setuptools-59.2.0-py{py_major}.{py_minor}.egg\"]\n+ local_egg = self.config.get('spawner.podman.avocado_spawner_egg')\n+ if local_egg:\n+ eggs.append(local_egg)\n+ else:\n+ remote_egg = f\"https://github.com/avocado-framework/avocado/releases/download/{VERSION}/avocado_framework-{VERSION}-py{py_major}.{py_minor}.egg\"\n+ eggs.append(remote_egg)\n+\n for url in eggs:\n path = self._fetch_asset(url)\n to = os.path.join('/tmp/', os.path.basename(path))\n", "issue": "Allow the deployment and use of development versions of Avocado\n**Is your feature request related to a problem? Please describe.**\r\nEpic issue https://github.com/avocado-framework/avocado/issues/4934 covers aspects such as creating and publishing (AFAICT) packages for every Avocado release, for every Python version, and how they will be deployed and installed on `DeploymentSpawner`s.\r\n\r\nBut, this doesn't address the development time needs, where unreleased code may be needed in both the job side (say running out of a GIT checkout), and on the isolated spawner environment.\r\n\r\n**Describe the solution you'd like**\r\nThere should be a way to signal a preference for given wheels to be used, instead of the latest release ones. \r\n\r\n**Describe alternatives you've considered**\r\nBuilding the wheels is probably going to be a separate step (such as the current `make whee`, and It will probably be necessary to refer to them by a local (non HTTP(s)) uri. Bonus point for an all-in-one `--mirror-this-repo-as-a-wheel-and-deploy-it` kind of feature, but I think this will be too complex.\r\n\n", "before_files": [{"content": "import asyncio\nimport json\nimport logging\nimport os\nimport subprocess\n\nfrom avocado.core.plugin_interfaces import CLI, DeploymentSpawner, Init\nfrom avocado.core.settings import settings\nfrom avocado.core.spawners.common import SpawnerMixin, SpawnMethod\nfrom avocado.core.version import VERSION\nfrom avocado.utils import distro\nfrom avocado.utils.asset import Asset\nfrom avocado.utils.podman import Podman, PodmanException\n\nLOG = logging.getLogger(__name__)\n\n\nENTRY_POINT_CMD = \"/tmp/avocado-runner\"\n\n\nclass PodmanSpawnerInit(Init):\n\n description = 'Podman (container) based spawner initialization'\n\n def initialize(self):\n section = 'spawner.podman'\n\n help_msg = 'Path to the podman binary'\n settings.register_option(\n section=section,\n key='bin',\n help_msg=help_msg,\n default='/usr/bin/podman')\n\n this_distro = distro.detect()\n if this_distro != distro.UNKNOWN_DISTRO:\n default_distro = '{0}:{1}'.format(this_distro.name,\n this_distro.version)\n else:\n default_distro = 'fedora:latest'\n help_msg = ('Image name to use when creating the container. '\n 'The first default choice is a container image '\n 'matching the current OS. If unable to detect, '\n 'default becomes the latest Fedora release. Default '\n 'on this system: {0}'.format(default_distro))\n settings.register_option(\n section=section,\n key='image',\n help_msg=help_msg,\n default=default_distro)\n\n\nclass PodmanCLI(CLI):\n\n name = 'podman'\n description = 'podman spawner command line options for \"run\"'\n\n def configure(self, parser):\n super().configure(parser)\n parser = parser.subcommands.choices.get('run', None)\n if parser is None:\n return\n\n parser = parser.add_argument_group('podman spawner specific options')\n settings.add_argparser_to_option(namespace='spawner.podman.bin',\n parser=parser,\n long_arg='--spawner-podman-bin',\n metavar='PODMAN_BIN')\n\n settings.add_argparser_to_option(namespace='spawner.podman.image',\n parser=parser,\n long_arg='--spawner-podman-image',\n metavar='CONTAINER_IMAGE')\n\n def run(self, config):\n pass\n\n\nclass PodmanSpawner(DeploymentSpawner, SpawnerMixin):\n\n description = 'Podman (container) based spawner'\n METHODS = [SpawnMethod.STANDALONE_EXECUTABLE]\n\n _PYTHON_VERSIONS_CACHE = {}\n\n def is_task_alive(self, runtime_task):\n if runtime_task.spawner_handle is None:\n return False\n podman_bin = self.config.get('spawner.podman.bin')\n cmd = [podman_bin, \"ps\", \"--all\", \"--format={{.State}}\",\n \"--filter=id=%s\" % runtime_task.spawner_handle]\n process = subprocess.Popen(cmd,\n stdin=subprocess.DEVNULL,\n stdout=subprocess.PIPE,\n stderr=subprocess.DEVNULL)\n out, _ = process.communicate()\n # FIXME: check how podman 2.x is reporting valid \"OK\" states\n return out.startswith(b'Up ')\n\n def _fetch_asset(self, url):\n cachedirs = self.config.get('datadir.paths.cache_dirs')\n asset = Asset(url, cache_dirs=cachedirs)\n return asset.fetch()\n\n def get_eggs_paths(self, py_major, py_minor):\n \"\"\"Return the basic eggs needed to bootstrap Avocado.\n\n This will return a tuple with the current location and where this\n should be deployed.\n \"\"\"\n result = []\n # Setuptools\n # For now let's pin to setuptools 59.2.\n # TODO: Automatically get latest setuptools version.\n eggs = [f\"https://github.com/avocado-framework/setuptools/releases/download/v59.2.0/setuptools-59.2.0-py{py_major}.{py_minor}.egg\",\n f\"https://github.com/avocado-framework/avocado/releases/download/{VERSION}/avocado_framework-{VERSION}-py{py_major}.{py_minor}.egg\"]\n for url in eggs:\n path = self._fetch_asset(url)\n to = os.path.join('/tmp/', os.path.basename(path))\n result.append((path, to))\n return result\n\n @property\n async def python_version(self):\n image = self.config.get('spawner.podman.image')\n if image not in self._PYTHON_VERSIONS_CACHE:\n if not self.podman:\n msg = \"Cannot get Python version: self.podman not defined.\"\n LOG.debug(msg)\n return None, None, None\n result = await self.podman.get_python_version(image)\n self._PYTHON_VERSIONS_CACHE[image] = result\n return self._PYTHON_VERSIONS_CACHE[image]\n\n async def deploy_artifacts(self):\n pass\n\n async def deploy_avocado(self, where):\n # Deploy all the eggs to container inside /tmp/\n major, minor, _ = await self.python_version\n eggs = self.get_eggs_paths(major, minor)\n\n for egg, to in eggs:\n await self.podman.copy_to_container(where, egg, to)\n\n async def _create_container_for_task(self, runtime_task, env_args,\n test_output=None):\n mount_status_server_socket = False\n mounted_status_server_socket = '/tmp/.status_server.sock'\n status_server_uri = runtime_task.task.status_services[0].uri\n if ':' not in status_server_uri:\n # a unix domain socket is being used\n mount_status_server_socket = True\n runtime_task.task.status_services[0].uri = mounted_status_server_socket\n\n _, _, python_binary = await self.python_version\n entry_point_args = [python_binary,\n '-m',\n 'avocado.core.nrunner',\n 'task-run']\n\n task = runtime_task.task\n entry_point_args.extend(task.get_command_args())\n entry_point = json.dumps(entry_point_args)\n entry_point_arg = \"--entrypoint=\" + entry_point\n\n if mount_status_server_socket:\n status_server_opts = (\n \"--privileged\",\n \"-v\", \"%s:%s\" % (status_server_uri,\n mounted_status_server_socket)\n )\n else:\n status_server_opts = (\"--net=host\", )\n\n output_opts = ()\n if test_output:\n podman_output = runtime_task.task.runnable.output_dir\n output_opts = (\"-v\", \"%s:%s\" % (test_output,\n os.path.expanduser(podman_output)))\n\n image = self.config.get('spawner.podman.image')\n\n envs = [f\"-e={k}={v}\" for k, v in env_args.items()]\n try:\n # pylint: disable=W0201\n _, stdout, _ = await self.podman.execute(\"create\",\n *status_server_opts,\n *output_opts,\n entry_point_arg,\n *envs,\n image)\n except PodmanException as ex:\n msg = f\"Could not create podman container: {ex}\"\n runtime_task.status = msg\n return False\n\n return stdout.decode().strip()\n\n async def spawn_task(self, runtime_task):\n self.create_task_output_dir(runtime_task)\n podman_bin = self.config.get('spawner.podman.bin')\n try:\n # pylint: disable=W0201\n self.podman = Podman(podman_bin)\n except PodmanException as ex:\n runtime_task.status = str(ex)\n return False\n\n major, minor, _ = await self.python_version\n # Return only the \"to\" location\n eggs = self.get_eggs_paths(major, minor)\n destination_eggs = \":\".join(map(lambda egg: str(egg[1]), eggs))\n env_args = {'PYTHONPATH': destination_eggs}\n output_dir_path = self.task_output_dir(runtime_task)\n container_id = await self._create_container_for_task(runtime_task,\n env_args,\n output_dir_path)\n\n runtime_task.spawner_handle = container_id\n\n await self.deploy_avocado(container_id)\n\n try:\n # pylint: disable=W0201\n returncode, _, _ = await self.podman.start(container_id)\n except PodmanException as ex:\n msg = f\"Could not start container: {ex}\"\n runtime_task.status = msg\n LOG.error(msg)\n return False\n\n return returncode == 0\n\n def create_task_output_dir(self, runtime_task):\n output_dir_path = self.task_output_dir(runtime_task)\n output_podman_path = '~/avocado/job-results/spawner/task'\n\n os.makedirs(output_dir_path, exist_ok=True)\n runtime_task.task.setup_output_dir(output_podman_path)\n\n async def wait_task(self, runtime_task):\n while True:\n if not self.is_task_alive(runtime_task):\n return\n await asyncio.sleep(0.1)\n\n @staticmethod\n async def check_task_requirements(runtime_task):\n \"\"\"Check the runtime task requirements needed to be able to run\"\"\"\n # right now, limit the check to the runner availability.\n if runtime_task.task.runnable.pick_runner_command() is None:\n return False\n return True\n", "path": "avocado/plugins/spawners/podman.py"}], "after_files": [{"content": "import asyncio\nimport json\nimport logging\nimport os\nimport subprocess\n\nfrom avocado.core.plugin_interfaces import CLI, DeploymentSpawner, Init\nfrom avocado.core.settings import settings\nfrom avocado.core.spawners.common import SpawnerMixin, SpawnMethod\nfrom avocado.core.version import VERSION\nfrom avocado.utils import distro\nfrom avocado.utils.asset import Asset\nfrom avocado.utils.podman import Podman, PodmanException\n\nLOG = logging.getLogger(__name__)\n\n\nENTRY_POINT_CMD = \"/tmp/avocado-runner\"\n\n\nclass PodmanSpawnerInit(Init):\n\n description = 'Podman (container) based spawner initialization'\n\n def initialize(self):\n section = 'spawner.podman'\n\n help_msg = 'Path to the podman binary'\n settings.register_option(\n section=section,\n key='bin',\n help_msg=help_msg,\n default='/usr/bin/podman')\n\n this_distro = distro.detect()\n if this_distro != distro.UNKNOWN_DISTRO:\n default_distro = '{0}:{1}'.format(this_distro.name,\n this_distro.version)\n else:\n default_distro = 'fedora:latest'\n help_msg = ('Image name to use when creating the container. '\n 'The first default choice is a container image '\n 'matching the current OS. If unable to detect, '\n 'default becomes the latest Fedora release. Default '\n 'on this system: {0}'.format(default_distro))\n settings.register_option(\n section=section,\n key='image',\n help_msg=help_msg,\n default=default_distro)\n\n help_msg = ('Avocado egg path to be used during initial bootstrap '\n 'of avocado inside the isolated environment. By default, '\n 'Avocado will try to download (or get from cache) an '\n 'egg from its repository.')\n\n settings.register_option(section=section,\n key='avocado_spawner_egg',\n help_msg=help_msg,\n default=None)\n\n\nclass PodmanCLI(CLI):\n\n name = 'podman'\n description = 'podman spawner command line options for \"run\"'\n\n def configure(self, parser):\n super().configure(parser)\n parser = parser.subcommands.choices.get('run', None)\n if parser is None:\n return\n\n parser = parser.add_argument_group('podman spawner specific options')\n settings.add_argparser_to_option(namespace='spawner.podman.bin',\n parser=parser,\n long_arg='--spawner-podman-bin',\n metavar='PODMAN_BIN')\n\n settings.add_argparser_to_option(namespace='spawner.podman.image',\n parser=parser,\n long_arg='--spawner-podman-image',\n metavar='CONTAINER_IMAGE')\n\n namespace = 'spawner.podman.avocado_spawner_egg'\n long_arg = '--spawner-podman-avocado-egg'\n settings.add_argparser_to_option(namespace=namespace,\n parser=parser,\n long_arg=long_arg,\n metavar='AVOCADO_EGG')\n\n def run(self, config):\n pass\n\n\nclass PodmanSpawner(DeploymentSpawner, SpawnerMixin):\n\n description = 'Podman (container) based spawner'\n METHODS = [SpawnMethod.STANDALONE_EXECUTABLE]\n\n _PYTHON_VERSIONS_CACHE = {}\n\n def is_task_alive(self, runtime_task):\n if runtime_task.spawner_handle is None:\n return False\n podman_bin = self.config.get('spawner.podman.bin')\n cmd = [podman_bin, \"ps\", \"--all\", \"--format={{.State}}\",\n \"--filter=id=%s\" % runtime_task.spawner_handle]\n process = subprocess.Popen(cmd,\n stdin=subprocess.DEVNULL,\n stdout=subprocess.PIPE,\n stderr=subprocess.DEVNULL)\n out, _ = process.communicate()\n # FIXME: check how podman 2.x is reporting valid \"OK\" states\n return out.startswith(b'Up ')\n\n def _fetch_asset(self, url):\n cachedirs = self.config.get('datadir.paths.cache_dirs')\n asset = Asset(url, cache_dirs=cachedirs)\n return asset.fetch()\n\n def get_eggs_paths(self, py_major, py_minor):\n \"\"\"Return the basic eggs needed to bootstrap Avocado.\n\n This will return a tuple with the current location and where this\n should be deployed.\n \"\"\"\n result = []\n # Setuptools\n # For now let's pin to setuptools 59.2.\n # TODO: Automatically get latest setuptools version.\n eggs = [f\"https://github.com/avocado-framework/setuptools/releases/download/v59.2.0/setuptools-59.2.0-py{py_major}.{py_minor}.egg\"]\n local_egg = self.config.get('spawner.podman.avocado_spawner_egg')\n if local_egg:\n eggs.append(local_egg)\n else:\n remote_egg = f\"https://github.com/avocado-framework/avocado/releases/download/{VERSION}/avocado_framework-{VERSION}-py{py_major}.{py_minor}.egg\"\n eggs.append(remote_egg)\n\n for url in eggs:\n path = self._fetch_asset(url)\n to = os.path.join('/tmp/', os.path.basename(path))\n result.append((path, to))\n return result\n\n @property\n async def python_version(self):\n image = self.config.get('spawner.podman.image')\n if image not in self._PYTHON_VERSIONS_CACHE:\n if not self.podman:\n msg = \"Cannot get Python version: self.podman not defined.\"\n LOG.debug(msg)\n return None, None, None\n result = await self.podman.get_python_version(image)\n self._PYTHON_VERSIONS_CACHE[image] = result\n return self._PYTHON_VERSIONS_CACHE[image]\n\n async def deploy_artifacts(self):\n pass\n\n async def deploy_avocado(self, where):\n # Deploy all the eggs to container inside /tmp/\n major, minor, _ = await self.python_version\n eggs = self.get_eggs_paths(major, minor)\n\n for egg, to in eggs:\n await self.podman.copy_to_container(where, egg, to)\n\n async def _create_container_for_task(self, runtime_task, env_args,\n test_output=None):\n mount_status_server_socket = False\n mounted_status_server_socket = '/tmp/.status_server.sock'\n status_server_uri = runtime_task.task.status_services[0].uri\n if ':' not in status_server_uri:\n # a unix domain socket is being used\n mount_status_server_socket = True\n runtime_task.task.status_services[0].uri = mounted_status_server_socket\n\n _, _, python_binary = await self.python_version\n entry_point_args = [python_binary,\n '-m',\n 'avocado.core.nrunner',\n 'task-run']\n\n task = runtime_task.task\n entry_point_args.extend(task.get_command_args())\n entry_point = json.dumps(entry_point_args)\n entry_point_arg = \"--entrypoint=\" + entry_point\n\n if mount_status_server_socket:\n status_server_opts = (\n \"--privileged\",\n \"-v\", \"%s:%s\" % (status_server_uri,\n mounted_status_server_socket)\n )\n else:\n status_server_opts = (\"--net=host\", )\n\n output_opts = ()\n if test_output:\n podman_output = runtime_task.task.runnable.output_dir\n output_opts = (\"-v\", \"%s:%s\" % (test_output,\n os.path.expanduser(podman_output)))\n\n image = self.config.get('spawner.podman.image')\n\n envs = [f\"-e={k}={v}\" for k, v in env_args.items()]\n try:\n # pylint: disable=W0201\n _, stdout, _ = await self.podman.execute(\"create\",\n *status_server_opts,\n *output_opts,\n entry_point_arg,\n *envs,\n image)\n except PodmanException as ex:\n msg = f\"Could not create podman container: {ex}\"\n runtime_task.status = msg\n return False\n\n return stdout.decode().strip()\n\n async def spawn_task(self, runtime_task):\n self.create_task_output_dir(runtime_task)\n podman_bin = self.config.get('spawner.podman.bin')\n try:\n # pylint: disable=W0201\n self.podman = Podman(podman_bin)\n except PodmanException as ex:\n runtime_task.status = str(ex)\n return False\n\n major, minor, _ = await self.python_version\n # Return only the \"to\" location\n eggs = self.get_eggs_paths(major, minor)\n destination_eggs = \":\".join(map(lambda egg: str(egg[1]), eggs))\n env_args = {'PYTHONPATH': destination_eggs}\n output_dir_path = self.task_output_dir(runtime_task)\n container_id = await self._create_container_for_task(runtime_task,\n env_args,\n output_dir_path)\n\n runtime_task.spawner_handle = container_id\n\n await self.deploy_avocado(container_id)\n\n try:\n # pylint: disable=W0201\n returncode, _, _ = await self.podman.start(container_id)\n except PodmanException as ex:\n msg = f\"Could not start container: {ex}\"\n runtime_task.status = msg\n LOG.error(msg)\n return False\n\n return returncode == 0\n\n def create_task_output_dir(self, runtime_task):\n output_dir_path = self.task_output_dir(runtime_task)\n output_podman_path = '~/avocado/job-results/spawner/task'\n\n os.makedirs(output_dir_path, exist_ok=True)\n runtime_task.task.setup_output_dir(output_podman_path)\n\n async def wait_task(self, runtime_task):\n while True:\n if not self.is_task_alive(runtime_task):\n return\n await asyncio.sleep(0.1)\n\n @staticmethod\n async def check_task_requirements(runtime_task):\n \"\"\"Check the runtime task requirements needed to be able to run\"\"\"\n # right now, limit the check to the runner availability.\n if runtime_task.task.runnable.pick_runner_command() is None:\n return False\n return True\n", "path": "avocado/plugins/spawners/podman.py"}]}
| 3,181 | 585 |
gh_patches_debug_19661
|
rasdani/github-patches
|
git_diff
|
gammapy__gammapy-5044
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
stat_null is not available on FluxPoints objects created with gammapy
**Gammapy version**
present dev, 1.1
**Bug description**
For `FluxPoints` created with gammapy estimators, the quantity `stat_null` is not available.
**To Reproduce**
1. Run the `spectral analysis notebook`. Then, accessing `flux_points.stat_null` gives an error
```
File ~/anaconda3/envs/gammapy-1.1/lib/python3.9/site-packages/gammapy/estimators/map/core.py:493, in FluxMaps.stat_null(self)
490 @property
491 def stat_null(self):
492 """Fit statistic value for the null hypothesis"""
--> 493 self._check_quantity("stat_null")
494 return self._data["stat_null"]
File ~/anaconda3/envs/gammapy-1.1/lib/python3.9/site-packages/gammapy/estimators/map/core.py:222, in FluxMaps._check_quantity(self, quantity)
220 def _check_quantity(self, quantity):
221 if quantity not in self.available_quantities:
--> 222 raise AttributeError(
223 f"Quantity '{quantity}' is not defined on current flux estimate."
224 )
AttributeError: Quantity 'stat_null' is not defined on current flux estimate.
```
Similarly for the PKS2155 lightcurve in gammapy-ata
**Other information**
This blocks the TS computation for #5007
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `gammapy/estimators/parameter.py`
Content:
```
1 # Licensed under a 3-clause BSD style license - see LICENSE.rst
2 import logging
3 import numpy as np
4 from gammapy.datasets import Datasets
5 from gammapy.datasets.actors import DatasetsActor
6 from gammapy.modeling import Fit
7 from .core import Estimator
8
9 log = logging.getLogger(__name__)
10
11
12 class ParameterEstimator(Estimator):
13 """Model parameter estimator.
14
15 Estimates a model parameter for a group of datasets. Compute best fit value,
16 symmetric and delta(TS) for a given null value. Additionally asymmetric errors
17 as well as parameter upper limit and fit statistic profile can be estimated.
18
19 Parameters
20 ----------
21 n_sigma : int
22 Sigma to use for asymmetric error computation. Default is 1.
23 n_sigma_ul : int
24 Sigma to use for upper limit computation. Default is 2.
25 null_value : float
26 Which null value to use for the parameter.
27 selection_optional : list of str, optional
28 Which additional quantities to estimate. Available options are:
29
30 * "all": all the optional steps are executed.
31 * "errn-errp": estimate asymmetric errors on parameter best fit value.
32 * "ul": estimate upper limits.
33 * "scan": estimate fit statistic profiles.
34
35 Default is None so the optional steps are not executed.
36 fit : `Fit`
37 Fit instance specifying the backend and fit options.
38 reoptimize : bool
39 Re-optimize other free model parameters. Default is True.
40 """
41
42 tag = "ParameterEstimator"
43 _available_selection_optional = ["errn-errp", "ul", "scan"]
44
45 def __init__(
46 self,
47 n_sigma=1,
48 n_sigma_ul=2,
49 null_value=1e-150,
50 selection_optional=None,
51 fit=None,
52 reoptimize=True,
53 ):
54 self.n_sigma = n_sigma
55 self.n_sigma_ul = n_sigma_ul
56 self.null_value = null_value
57 self.selection_optional = selection_optional
58
59 if fit is None:
60 fit = Fit()
61
62 self.fit = fit
63 self.reoptimize = reoptimize
64
65 def estimate_best_fit(self, datasets, parameter):
66 """Estimate parameter asymmetric errors.
67
68 Parameters
69 ----------
70 datasets : `~gammapy.datasets.Datasets`
71 Datasets.
72 parameter : `Parameter`
73 For which parameter to get the value.
74
75 Returns
76 -------
77 result : dict
78 Dictionary with the various parameter estimation values. Entries are:
79
80 * parameter.name: best fit parameter value.
81 * "stat": best fit total stat.
82 * "success": boolean flag for fit success.
83 * parameter.name_err: covariance-based error estimate on parameter value.
84 """
85 value, total_stat, success, error = np.nan, 0.0, False, np.nan
86
87 if np.any(datasets.contributes_to_stat):
88 result = self.fit.run(datasets=datasets)
89 value, error = parameter.value, parameter.error
90 total_stat = result.optimize_result.total_stat
91 success = result.success
92
93 return {
94 f"{parameter.name}": value,
95 "stat": total_stat,
96 "success": success,
97 f"{parameter.name}_err": error * self.n_sigma,
98 }
99
100 def estimate_ts(self, datasets, parameter):
101 """Estimate parameter ts.
102
103 Parameters
104 ----------
105 datasets : `~gammapy.datasets.Datasets`
106 Datasets.
107 parameter : `Parameter`
108 For which parameter to get the value.
109
110 Returns
111 -------
112 result : dict
113 Dictionary with the test statistic of the best fit value compared to the null hypothesis. Entries are:
114
115 * "ts" : fit statistic difference with null hypothesis.
116 * "npred" : predicted number of counts per dataset.
117 """
118 npred = self.estimate_npred(datasets=datasets)
119
120 if not np.any(datasets.contributes_to_stat):
121 stat = np.nan
122 npred["npred"][...] = np.nan
123 else:
124 stat = datasets.stat_sum()
125
126 with datasets.parameters.restore_status():
127 # compute ts value
128 parameter.value = self.null_value
129
130 if self.reoptimize:
131 parameter.frozen = True
132 _ = self.fit.optimize(datasets=datasets)
133
134 ts = datasets.stat_sum() - stat
135
136 return {
137 "ts": ts,
138 "npred": npred["npred"],
139 }
140
141 def estimate_errn_errp(self, datasets, parameter):
142 """Estimate parameter asymmetric errors.
143
144 Parameters
145 ----------
146 datasets : `~gammapy.datasets.Datasets`
147 Datasets.
148 parameter : `Parameter`
149 For which parameter to get the value.
150
151 Returns
152 -------
153 result : dict
154 Dictionary with the parameter asymmetric errors. Entries are:
155
156 * {parameter.name}_errp : positive error on parameter value.
157 * {parameter.name}_errn : negative error on parameter value.
158 """
159 if not np.any(datasets.contributes_to_stat):
160 return {
161 f"{parameter.name}_errp": np.nan,
162 f"{parameter.name}_errn": np.nan,
163 }
164
165 self.fit.optimize(datasets=datasets)
166
167 res = self.fit.confidence(
168 datasets=datasets,
169 parameter=parameter,
170 sigma=self.n_sigma,
171 reoptimize=self.reoptimize,
172 )
173
174 return {
175 f"{parameter.name}_errp": res["errp"],
176 f"{parameter.name}_errn": res["errn"],
177 }
178
179 def estimate_scan(self, datasets, parameter):
180 """Estimate parameter statistic scan.
181
182 Parameters
183 ----------
184 datasets : `~gammapy.datasets.Datasets`
185 The datasets used to estimate the model parameter.
186 parameter : `~gammapy.modeling.Parameter`
187 For which parameter to get the value.
188
189 Returns
190 -------
191 result : dict
192 Dictionary with the parameter fit scan values. Entries are:
193
194 * parameter.name_scan : parameter values scan.
195 * "stat_scan" : fit statistic values scan.
196 """
197 scan_values = parameter.scan_values
198
199 if not np.any(datasets.contributes_to_stat):
200 return {
201 f"{parameter.name}_scan": scan_values,
202 "stat_scan": scan_values * np.nan,
203 }
204
205 self.fit.optimize(datasets=datasets)
206
207 profile = self.fit.stat_profile(
208 datasets=datasets, parameter=parameter, reoptimize=self.reoptimize
209 )
210
211 return {
212 f"{parameter.name}_scan": scan_values,
213 "stat_scan": profile["stat_scan"],
214 }
215
216 def estimate_ul(self, datasets, parameter):
217 """Estimate parameter ul.
218
219 Parameters
220 ----------
221 datasets : `~gammapy.datasets.Datasets`
222 The datasets used to estimate the model parameter.
223 parameter : `~gammapy.modeling.Parameter`
224 For which parameter to get the value.
225
226 Returns
227 -------
228 result : dict
229 Dictionary with the parameter upper limits. Entries are:
230
231 * parameter.name_ul : upper limit on parameter value.
232 """
233 if not np.any(datasets.contributes_to_stat):
234 return {f"{parameter.name}_ul": np.nan}
235
236 self.fit.optimize(datasets=datasets)
237
238 res = self.fit.confidence(
239 datasets=datasets,
240 parameter=parameter,
241 sigma=self.n_sigma_ul,
242 reoptimize=self.reoptimize,
243 )
244 return {f"{parameter.name}_ul": res["errp"] + parameter.value}
245
246 @staticmethod
247 def estimate_counts(datasets):
248 """Estimate counts for the flux point.
249
250 Parameters
251 ----------
252 datasets : Datasets
253 Datasets.
254
255 Returns
256 -------
257 result : dict
258 Dictionary with an array with one entry per dataset with the sum of the
259 masked counts.
260 """
261 counts = []
262
263 for dataset in datasets:
264 mask = dataset.mask
265 counts.append(dataset.counts.data[mask].sum())
266
267 return {"counts": np.array(counts, dtype=int), "datasets": datasets.names}
268
269 @staticmethod
270 def estimate_npred(datasets):
271 """Estimate npred for the flux point.
272
273 Parameters
274 ----------
275 datasets : `~gammapy.datasets.Datasets`
276 Datasets.
277
278 Returns
279 -------
280 result : dict
281 Dictionary with an array with one entry per dataset with the sum of the
282 masked npred.
283 """
284 npred = []
285
286 for dataset in datasets:
287 mask = dataset.mask
288 npred.append(dataset.npred().data[mask].sum())
289
290 return {"npred": np.array(npred), "datasets": datasets.names}
291
292 def run(self, datasets, parameter):
293 """Run the parameter estimator.
294
295 Parameters
296 ----------
297 datasets : `~gammapy.datasets.Datasets`
298 The datasets used to estimate the model parameter.
299 parameter : `str` or `~gammapy.modeling.Parameter`
300 For which parameter to run the estimator.
301
302 Returns
303 -------
304 result : dict
305 Dictionary with the various parameter estimation values.
306 """
307 if not isinstance(datasets, DatasetsActor):
308 datasets = Datasets(datasets)
309 parameter = datasets.parameters[parameter]
310
311 with datasets.parameters.restore_status():
312
313 if not self.reoptimize:
314 datasets.parameters.freeze_all()
315 parameter.frozen = False
316
317 result = self.estimate_best_fit(datasets, parameter)
318 result.update(self.estimate_ts(datasets, parameter))
319
320 if "errn-errp" in self.selection_optional:
321 result.update(self.estimate_errn_errp(datasets, parameter))
322
323 if "ul" in self.selection_optional:
324 result.update(self.estimate_ul(datasets, parameter))
325
326 if "scan" in self.selection_optional:
327 result.update(self.estimate_scan(datasets, parameter))
328
329 result.update(self.estimate_counts(datasets))
330 return result
331
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/gammapy/estimators/parameter.py b/gammapy/estimators/parameter.py
--- a/gammapy/estimators/parameter.py
+++ b/gammapy/estimators/parameter.py
@@ -114,6 +114,7 @@
* "ts" : fit statistic difference with null hypothesis.
* "npred" : predicted number of counts per dataset.
+ * "stat_null" : total stat corresponding to the null hypothesis
"""
npred = self.estimate_npred(datasets=datasets)
@@ -132,11 +133,9 @@
_ = self.fit.optimize(datasets=datasets)
ts = datasets.stat_sum() - stat
+ stat_null = datasets.stat_sum()
- return {
- "ts": ts,
- "npred": npred["npred"],
- }
+ return {"ts": ts, "npred": npred["npred"], "stat_null": stat_null}
def estimate_errn_errp(self, datasets, parameter):
"""Estimate parameter asymmetric errors.
|
{"golden_diff": "diff --git a/gammapy/estimators/parameter.py b/gammapy/estimators/parameter.py\n--- a/gammapy/estimators/parameter.py\n+++ b/gammapy/estimators/parameter.py\n@@ -114,6 +114,7 @@\n \n * \"ts\" : fit statistic difference with null hypothesis.\n * \"npred\" : predicted number of counts per dataset.\n+ * \"stat_null\" : total stat corresponding to the null hypothesis\n \"\"\"\n npred = self.estimate_npred(datasets=datasets)\n \n@@ -132,11 +133,9 @@\n _ = self.fit.optimize(datasets=datasets)\n \n ts = datasets.stat_sum() - stat\n+ stat_null = datasets.stat_sum()\n \n- return {\n- \"ts\": ts,\n- \"npred\": npred[\"npred\"],\n- }\n+ return {\"ts\": ts, \"npred\": npred[\"npred\"], \"stat_null\": stat_null}\n \n def estimate_errn_errp(self, datasets, parameter):\n \"\"\"Estimate parameter asymmetric errors.\n", "issue": "stat_null is not available on FluxPoints objects created with gammapy\n**Gammapy version**\r\npresent dev, 1.1\r\n\r\n\r\n**Bug description**\r\nFor `FluxPoints` created with gammapy estimators, the quantity `stat_null` is not available.\r\n\r\n\r\n**To Reproduce**\r\n1. Run the `spectral analysis notebook`. Then, accessing `flux_points.stat_null` gives an error\r\n```\r\nFile ~/anaconda3/envs/gammapy-1.1/lib/python3.9/site-packages/gammapy/estimators/map/core.py:493, in FluxMaps.stat_null(self)\r\n 490 @property\r\n 491 def stat_null(self):\r\n 492 \"\"\"Fit statistic value for the null hypothesis\"\"\"\r\n--> 493 self._check_quantity(\"stat_null\")\r\n 494 return self._data[\"stat_null\"]\r\n\r\nFile ~/anaconda3/envs/gammapy-1.1/lib/python3.9/site-packages/gammapy/estimators/map/core.py:222, in FluxMaps._check_quantity(self, quantity)\r\n 220 def _check_quantity(self, quantity):\r\n 221 if quantity not in self.available_quantities:\r\n--> 222 raise AttributeError(\r\n 223 f\"Quantity '{quantity}' is not defined on current flux estimate.\"\r\n 224 )\r\n\r\nAttributeError: Quantity 'stat_null' is not defined on current flux estimate.\r\n```\r\n\r\nSimilarly for the PKS2155 lightcurve in gammapy-ata\r\n\r\n\r\n**Other information**\r\nThis blocks the TS computation for #5007\r\n\n", "before_files": [{"content": "# Licensed under a 3-clause BSD style license - see LICENSE.rst\nimport logging\nimport numpy as np\nfrom gammapy.datasets import Datasets\nfrom gammapy.datasets.actors import DatasetsActor\nfrom gammapy.modeling import Fit\nfrom .core import Estimator\n\nlog = logging.getLogger(__name__)\n\n\nclass ParameterEstimator(Estimator):\n \"\"\"Model parameter estimator.\n\n Estimates a model parameter for a group of datasets. Compute best fit value,\n symmetric and delta(TS) for a given null value. Additionally asymmetric errors\n as well as parameter upper limit and fit statistic profile can be estimated.\n\n Parameters\n ----------\n n_sigma : int\n Sigma to use for asymmetric error computation. Default is 1.\n n_sigma_ul : int\n Sigma to use for upper limit computation. Default is 2.\n null_value : float\n Which null value to use for the parameter.\n selection_optional : list of str, optional\n Which additional quantities to estimate. Available options are:\n\n * \"all\": all the optional steps are executed.\n * \"errn-errp\": estimate asymmetric errors on parameter best fit value.\n * \"ul\": estimate upper limits.\n * \"scan\": estimate fit statistic profiles.\n\n Default is None so the optional steps are not executed.\n fit : `Fit`\n Fit instance specifying the backend and fit options.\n reoptimize : bool\n Re-optimize other free model parameters. Default is True.\n \"\"\"\n\n tag = \"ParameterEstimator\"\n _available_selection_optional = [\"errn-errp\", \"ul\", \"scan\"]\n\n def __init__(\n self,\n n_sigma=1,\n n_sigma_ul=2,\n null_value=1e-150,\n selection_optional=None,\n fit=None,\n reoptimize=True,\n ):\n self.n_sigma = n_sigma\n self.n_sigma_ul = n_sigma_ul\n self.null_value = null_value\n self.selection_optional = selection_optional\n\n if fit is None:\n fit = Fit()\n\n self.fit = fit\n self.reoptimize = reoptimize\n\n def estimate_best_fit(self, datasets, parameter):\n \"\"\"Estimate parameter asymmetric errors.\n\n Parameters\n ----------\n datasets : `~gammapy.datasets.Datasets`\n Datasets.\n parameter : `Parameter`\n For which parameter to get the value.\n\n Returns\n -------\n result : dict\n Dictionary with the various parameter estimation values. Entries are:\n\n * parameter.name: best fit parameter value.\n * \"stat\": best fit total stat.\n * \"success\": boolean flag for fit success.\n * parameter.name_err: covariance-based error estimate on parameter value.\n \"\"\"\n value, total_stat, success, error = np.nan, 0.0, False, np.nan\n\n if np.any(datasets.contributes_to_stat):\n result = self.fit.run(datasets=datasets)\n value, error = parameter.value, parameter.error\n total_stat = result.optimize_result.total_stat\n success = result.success\n\n return {\n f\"{parameter.name}\": value,\n \"stat\": total_stat,\n \"success\": success,\n f\"{parameter.name}_err\": error * self.n_sigma,\n }\n\n def estimate_ts(self, datasets, parameter):\n \"\"\"Estimate parameter ts.\n\n Parameters\n ----------\n datasets : `~gammapy.datasets.Datasets`\n Datasets.\n parameter : `Parameter`\n For which parameter to get the value.\n\n Returns\n -------\n result : dict\n Dictionary with the test statistic of the best fit value compared to the null hypothesis. Entries are:\n\n * \"ts\" : fit statistic difference with null hypothesis.\n * \"npred\" : predicted number of counts per dataset.\n \"\"\"\n npred = self.estimate_npred(datasets=datasets)\n\n if not np.any(datasets.contributes_to_stat):\n stat = np.nan\n npred[\"npred\"][...] = np.nan\n else:\n stat = datasets.stat_sum()\n\n with datasets.parameters.restore_status():\n # compute ts value\n parameter.value = self.null_value\n\n if self.reoptimize:\n parameter.frozen = True\n _ = self.fit.optimize(datasets=datasets)\n\n ts = datasets.stat_sum() - stat\n\n return {\n \"ts\": ts,\n \"npred\": npred[\"npred\"],\n }\n\n def estimate_errn_errp(self, datasets, parameter):\n \"\"\"Estimate parameter asymmetric errors.\n\n Parameters\n ----------\n datasets : `~gammapy.datasets.Datasets`\n Datasets.\n parameter : `Parameter`\n For which parameter to get the value.\n\n Returns\n -------\n result : dict\n Dictionary with the parameter asymmetric errors. Entries are:\n\n * {parameter.name}_errp : positive error on parameter value.\n * {parameter.name}_errn : negative error on parameter value.\n \"\"\"\n if not np.any(datasets.contributes_to_stat):\n return {\n f\"{parameter.name}_errp\": np.nan,\n f\"{parameter.name}_errn\": np.nan,\n }\n\n self.fit.optimize(datasets=datasets)\n\n res = self.fit.confidence(\n datasets=datasets,\n parameter=parameter,\n sigma=self.n_sigma,\n reoptimize=self.reoptimize,\n )\n\n return {\n f\"{parameter.name}_errp\": res[\"errp\"],\n f\"{parameter.name}_errn\": res[\"errn\"],\n }\n\n def estimate_scan(self, datasets, parameter):\n \"\"\"Estimate parameter statistic scan.\n\n Parameters\n ----------\n datasets : `~gammapy.datasets.Datasets`\n The datasets used to estimate the model parameter.\n parameter : `~gammapy.modeling.Parameter`\n For which parameter to get the value.\n\n Returns\n -------\n result : dict\n Dictionary with the parameter fit scan values. Entries are:\n\n * parameter.name_scan : parameter values scan.\n * \"stat_scan\" : fit statistic values scan.\n \"\"\"\n scan_values = parameter.scan_values\n\n if not np.any(datasets.contributes_to_stat):\n return {\n f\"{parameter.name}_scan\": scan_values,\n \"stat_scan\": scan_values * np.nan,\n }\n\n self.fit.optimize(datasets=datasets)\n\n profile = self.fit.stat_profile(\n datasets=datasets, parameter=parameter, reoptimize=self.reoptimize\n )\n\n return {\n f\"{parameter.name}_scan\": scan_values,\n \"stat_scan\": profile[\"stat_scan\"],\n }\n\n def estimate_ul(self, datasets, parameter):\n \"\"\"Estimate parameter ul.\n\n Parameters\n ----------\n datasets : `~gammapy.datasets.Datasets`\n The datasets used to estimate the model parameter.\n parameter : `~gammapy.modeling.Parameter`\n For which parameter to get the value.\n\n Returns\n -------\n result : dict\n Dictionary with the parameter upper limits. Entries are:\n\n * parameter.name_ul : upper limit on parameter value.\n \"\"\"\n if not np.any(datasets.contributes_to_stat):\n return {f\"{parameter.name}_ul\": np.nan}\n\n self.fit.optimize(datasets=datasets)\n\n res = self.fit.confidence(\n datasets=datasets,\n parameter=parameter,\n sigma=self.n_sigma_ul,\n reoptimize=self.reoptimize,\n )\n return {f\"{parameter.name}_ul\": res[\"errp\"] + parameter.value}\n\n @staticmethod\n def estimate_counts(datasets):\n \"\"\"Estimate counts for the flux point.\n\n Parameters\n ----------\n datasets : Datasets\n Datasets.\n\n Returns\n -------\n result : dict\n Dictionary with an array with one entry per dataset with the sum of the\n masked counts.\n \"\"\"\n counts = []\n\n for dataset in datasets:\n mask = dataset.mask\n counts.append(dataset.counts.data[mask].sum())\n\n return {\"counts\": np.array(counts, dtype=int), \"datasets\": datasets.names}\n\n @staticmethod\n def estimate_npred(datasets):\n \"\"\"Estimate npred for the flux point.\n\n Parameters\n ----------\n datasets : `~gammapy.datasets.Datasets`\n Datasets.\n\n Returns\n -------\n result : dict\n Dictionary with an array with one entry per dataset with the sum of the\n masked npred.\n \"\"\"\n npred = []\n\n for dataset in datasets:\n mask = dataset.mask\n npred.append(dataset.npred().data[mask].sum())\n\n return {\"npred\": np.array(npred), \"datasets\": datasets.names}\n\n def run(self, datasets, parameter):\n \"\"\"Run the parameter estimator.\n\n Parameters\n ----------\n datasets : `~gammapy.datasets.Datasets`\n The datasets used to estimate the model parameter.\n parameter : `str` or `~gammapy.modeling.Parameter`\n For which parameter to run the estimator.\n\n Returns\n -------\n result : dict\n Dictionary with the various parameter estimation values.\n \"\"\"\n if not isinstance(datasets, DatasetsActor):\n datasets = Datasets(datasets)\n parameter = datasets.parameters[parameter]\n\n with datasets.parameters.restore_status():\n\n if not self.reoptimize:\n datasets.parameters.freeze_all()\n parameter.frozen = False\n\n result = self.estimate_best_fit(datasets, parameter)\n result.update(self.estimate_ts(datasets, parameter))\n\n if \"errn-errp\" in self.selection_optional:\n result.update(self.estimate_errn_errp(datasets, parameter))\n\n if \"ul\" in self.selection_optional:\n result.update(self.estimate_ul(datasets, parameter))\n\n if \"scan\" in self.selection_optional:\n result.update(self.estimate_scan(datasets, parameter))\n\n result.update(self.estimate_counts(datasets))\n return result\n", "path": "gammapy/estimators/parameter.py"}], "after_files": [{"content": "# Licensed under a 3-clause BSD style license - see LICENSE.rst\nimport logging\nimport numpy as np\nfrom gammapy.datasets import Datasets\nfrom gammapy.datasets.actors import DatasetsActor\nfrom gammapy.modeling import Fit\nfrom .core import Estimator\n\nlog = logging.getLogger(__name__)\n\n\nclass ParameterEstimator(Estimator):\n \"\"\"Model parameter estimator.\n\n Estimates a model parameter for a group of datasets. Compute best fit value,\n symmetric and delta(TS) for a given null value. Additionally asymmetric errors\n as well as parameter upper limit and fit statistic profile can be estimated.\n\n Parameters\n ----------\n n_sigma : int\n Sigma to use for asymmetric error computation. Default is 1.\n n_sigma_ul : int\n Sigma to use for upper limit computation. Default is 2.\n null_value : float\n Which null value to use for the parameter.\n selection_optional : list of str, optional\n Which additional quantities to estimate. Available options are:\n\n * \"all\": all the optional steps are executed.\n * \"errn-errp\": estimate asymmetric errors on parameter best fit value.\n * \"ul\": estimate upper limits.\n * \"scan\": estimate fit statistic profiles.\n\n Default is None so the optional steps are not executed.\n fit : `Fit`\n Fit instance specifying the backend and fit options.\n reoptimize : bool\n Re-optimize other free model parameters. Default is True.\n \"\"\"\n\n tag = \"ParameterEstimator\"\n _available_selection_optional = [\"errn-errp\", \"ul\", \"scan\"]\n\n def __init__(\n self,\n n_sigma=1,\n n_sigma_ul=2,\n null_value=1e-150,\n selection_optional=None,\n fit=None,\n reoptimize=True,\n ):\n self.n_sigma = n_sigma\n self.n_sigma_ul = n_sigma_ul\n self.null_value = null_value\n self.selection_optional = selection_optional\n\n if fit is None:\n fit = Fit()\n\n self.fit = fit\n self.reoptimize = reoptimize\n\n def estimate_best_fit(self, datasets, parameter):\n \"\"\"Estimate parameter asymmetric errors.\n\n Parameters\n ----------\n datasets : `~gammapy.datasets.Datasets`\n Datasets.\n parameter : `Parameter`\n For which parameter to get the value.\n\n Returns\n -------\n result : dict\n Dictionary with the various parameter estimation values. Entries are:\n\n * parameter.name: best fit parameter value.\n * \"stat\": best fit total stat.\n * \"success\": boolean flag for fit success.\n * parameter.name_err: covariance-based error estimate on parameter value.\n \"\"\"\n value, total_stat, success, error = np.nan, 0.0, False, np.nan\n\n if np.any(datasets.contributes_to_stat):\n result = self.fit.run(datasets=datasets)\n value, error = parameter.value, parameter.error\n total_stat = result.optimize_result.total_stat\n success = result.success\n\n return {\n f\"{parameter.name}\": value,\n \"stat\": total_stat,\n \"success\": success,\n f\"{parameter.name}_err\": error * self.n_sigma,\n }\n\n def estimate_ts(self, datasets, parameter):\n \"\"\"Estimate parameter ts.\n\n Parameters\n ----------\n datasets : `~gammapy.datasets.Datasets`\n Datasets.\n parameter : `Parameter`\n For which parameter to get the value.\n\n Returns\n -------\n result : dict\n Dictionary with the test statistic of the best fit value compared to the null hypothesis. Entries are:\n\n * \"ts\" : fit statistic difference with null hypothesis.\n * \"npred\" : predicted number of counts per dataset.\n * \"stat_null\" : total stat corresponding to the null hypothesis\n \"\"\"\n npred = self.estimate_npred(datasets=datasets)\n\n if not np.any(datasets.contributes_to_stat):\n stat = np.nan\n npred[\"npred\"][...] = np.nan\n else:\n stat = datasets.stat_sum()\n\n with datasets.parameters.restore_status():\n # compute ts value\n parameter.value = self.null_value\n\n if self.reoptimize:\n parameter.frozen = True\n _ = self.fit.optimize(datasets=datasets)\n\n ts = datasets.stat_sum() - stat\n stat_null = datasets.stat_sum()\n\n return {\"ts\": ts, \"npred\": npred[\"npred\"], \"stat_null\": stat_null}\n\n def estimate_errn_errp(self, datasets, parameter):\n \"\"\"Estimate parameter asymmetric errors.\n\n Parameters\n ----------\n datasets : `~gammapy.datasets.Datasets`\n Datasets.\n parameter : `Parameter`\n For which parameter to get the value.\n\n Returns\n -------\n result : dict\n Dictionary with the parameter asymmetric errors. Entries are:\n\n * {parameter.name}_errp : positive error on parameter value.\n * {parameter.name}_errn : negative error on parameter value.\n \"\"\"\n if not np.any(datasets.contributes_to_stat):\n return {\n f\"{parameter.name}_errp\": np.nan,\n f\"{parameter.name}_errn\": np.nan,\n }\n\n self.fit.optimize(datasets=datasets)\n\n res = self.fit.confidence(\n datasets=datasets,\n parameter=parameter,\n sigma=self.n_sigma,\n reoptimize=self.reoptimize,\n )\n\n return {\n f\"{parameter.name}_errp\": res[\"errp\"],\n f\"{parameter.name}_errn\": res[\"errn\"],\n }\n\n def estimate_scan(self, datasets, parameter):\n \"\"\"Estimate parameter statistic scan.\n\n Parameters\n ----------\n datasets : `~gammapy.datasets.Datasets`\n The datasets used to estimate the model parameter.\n parameter : `~gammapy.modeling.Parameter`\n For which parameter to get the value.\n\n Returns\n -------\n result : dict\n Dictionary with the parameter fit scan values. Entries are:\n\n * parameter.name_scan : parameter values scan.\n * \"stat_scan\" : fit statistic values scan.\n \"\"\"\n scan_values = parameter.scan_values\n\n if not np.any(datasets.contributes_to_stat):\n return {\n f\"{parameter.name}_scan\": scan_values,\n \"stat_scan\": scan_values * np.nan,\n }\n\n self.fit.optimize(datasets=datasets)\n\n profile = self.fit.stat_profile(\n datasets=datasets, parameter=parameter, reoptimize=self.reoptimize\n )\n\n return {\n f\"{parameter.name}_scan\": scan_values,\n \"stat_scan\": profile[\"stat_scan\"],\n }\n\n def estimate_ul(self, datasets, parameter):\n \"\"\"Estimate parameter ul.\n\n Parameters\n ----------\n datasets : `~gammapy.datasets.Datasets`\n The datasets used to estimate the model parameter.\n parameter : `~gammapy.modeling.Parameter`\n For which parameter to get the value.\n\n Returns\n -------\n result : dict\n Dictionary with the parameter upper limits. Entries are:\n\n * parameter.name_ul : upper limit on parameter value.\n \"\"\"\n if not np.any(datasets.contributes_to_stat):\n return {f\"{parameter.name}_ul\": np.nan}\n\n self.fit.optimize(datasets=datasets)\n\n res = self.fit.confidence(\n datasets=datasets,\n parameter=parameter,\n sigma=self.n_sigma_ul,\n reoptimize=self.reoptimize,\n )\n return {f\"{parameter.name}_ul\": res[\"errp\"] + parameter.value}\n\n @staticmethod\n def estimate_counts(datasets):\n \"\"\"Estimate counts for the flux point.\n\n Parameters\n ----------\n datasets : Datasets\n Datasets.\n\n Returns\n -------\n result : dict\n Dictionary with an array with one entry per dataset with the sum of the\n masked counts.\n \"\"\"\n counts = []\n\n for dataset in datasets:\n mask = dataset.mask\n counts.append(dataset.counts.data[mask].sum())\n\n return {\"counts\": np.array(counts, dtype=int), \"datasets\": datasets.names}\n\n @staticmethod\n def estimate_npred(datasets):\n \"\"\"Estimate npred for the flux point.\n\n Parameters\n ----------\n datasets : `~gammapy.datasets.Datasets`\n Datasets.\n\n Returns\n -------\n result : dict\n Dictionary with an array with one entry per dataset with the sum of the\n masked npred.\n \"\"\"\n npred = []\n\n for dataset in datasets:\n mask = dataset.mask\n npred.append(dataset.npred().data[mask].sum())\n\n return {\"npred\": np.array(npred), \"datasets\": datasets.names}\n\n def run(self, datasets, parameter):\n \"\"\"Run the parameter estimator.\n\n Parameters\n ----------\n datasets : `~gammapy.datasets.Datasets`\n The datasets used to estimate the model parameter.\n parameter : `str` or `~gammapy.modeling.Parameter`\n For which parameter to run the estimator.\n\n Returns\n -------\n result : dict\n Dictionary with the various parameter estimation values.\n \"\"\"\n if not isinstance(datasets, DatasetsActor):\n datasets = Datasets(datasets)\n parameter = datasets.parameters[parameter]\n\n with datasets.parameters.restore_status():\n\n if not self.reoptimize:\n datasets.parameters.freeze_all()\n parameter.frozen = False\n\n result = self.estimate_best_fit(datasets, parameter)\n result.update(self.estimate_ts(datasets, parameter))\n\n if \"errn-errp\" in self.selection_optional:\n result.update(self.estimate_errn_errp(datasets, parameter))\n\n if \"ul\" in self.selection_optional:\n result.update(self.estimate_ul(datasets, parameter))\n\n if \"scan\" in self.selection_optional:\n result.update(self.estimate_scan(datasets, parameter))\n\n result.update(self.estimate_counts(datasets))\n return result\n", "path": "gammapy/estimators/parameter.py"}]}
| 3,624 | 247 |
gh_patches_debug_9250
|
rasdani/github-patches
|
git_diff
|
opensearch-project__opensearch-build-1852
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[RPM M1] Add a new block to call the generation code for RPM
Tasks | Estimate | Status | Notes | Dependencies
-- | -- | -- | -- | --
The generation code should pull the artifacts from the build workflow to a temporary location | 1 | Complete | | Build workflow must provide usable artifacts
The code will call existing install function to install plugins on min artifacts | 1 | Complete | |
After installation, the code will execute a tool or utility to wrap all the content into a RPM package | 5 | Complete | Require writing a script to utilize FPM to start with and later implement in pure python code. <br><br>20220204: We might change to rpmbuild directly without using FPM. See comments. | FPM usages
The code will also add dependencies to the RPM package so that things like JDK and additional libs for plugins can be installed and pulled separately | 5 | Complete | Need to study on RPM dependency setups | RPM Build Dependencies and the locations of each dependent artifact
The code will move the RPM package from the temp location to dist folder | 2 | Complete | |
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/assemble_workflow/bundle_rpm.py`
Content:
```
1 # SPDX-License-Identifier: Apache-2.0
2 #
3 # The OpenSearch Contributors require contributions made to
4 # this file be licensed under the Apache-2.0 license or a
5 # compatible open source license.
6
7 import logging
8 import os
9 import shutil
10 import subprocess
11
12 from manifests.build_manifest import BuildManifest
13 from system.os import rpm_architecture
14
15
16 class BundleRpm:
17
18 def __init__(self, filename: str, package_path: str, min_path: str) -> None:
19 self.filename = filename
20 self.package_path = package_path
21 self.min_path = min_path
22
23 def extract(self, dest: str) -> None:
24 cpio_basename = os.path.splitext(os.path.basename(self.package_path))[0]
25 cpio_path = os.path.join(dest, f"{cpio_basename}.cpio")
26 min_source_path = os.path.join(dest, 'usr', 'share', self.filename)
27 min_dest_path = os.path.join(dest, self.min_path)
28 min_config_path = os.path.join(dest, 'etc', self.filename)
29 min_bin_env_path = os.path.join(min_dest_path, 'bin', f"{self.filename}-env")
30
31 # Convert rpm to cpio so we can extract the content
32 logging.info(f"Convert rpm to cpio for extraction: {self.package_path} to {cpio_path}")
33 with open(cpio_path, 'wb') as fp:
34 subprocess.check_call(
35 [
36 'rpm2cpio',
37 self.package_path,
38 ],
39 stdout=fp,
40 cwd=dest,
41 )
42
43 # Extract cpio archive based on the rpm package
44 logging.info(f"Extract cpio {cpio_path} content to {dest}")
45 with open(cpio_path, 'rb') as fp:
46 subprocess.check_call(
47 [
48 'cpio',
49 '-imdv',
50 ],
51 stdin=fp,
52 stdout=subprocess.DEVNULL,
53 stderr=subprocess.STDOUT,
54 cwd=dest,
55 )
56
57 # Move core folder destination so plugin install can proceed
58 logging.info(f"Move {min_source_path} to {min_dest_path} for plugin installation")
59 shutil.move(min_source_path, min_dest_path)
60
61 # Multiple modifications and env vars setups before install plugins
62 # As bin/opensearch-env is different between archive and package
63 # https://github.com/opensearch-project/OpenSearch/issues/2092
64 os.environ[f"{self.filename.upper()}_PATH_CONF"] = min_config_path
65
66 if os.path.exists(min_bin_env_path):
67 # Backup original file
68 shutil.copy2(min_bin_env_path, f"{min_bin_env_path}.backup")
69 # Prevent sourcing as file is only in place after rpm installation
70 # So that min can install plugin zips
71 # Have to decode then encode back to ascii due to mypy complains TextIO not equals to BinaryIO
72 with open(min_bin_env_path, 'rb') as fp:
73 min_bin_env_lines = fp.read().decode('ascii')
74
75 with open(min_bin_env_path, 'wb') as fp:
76 fp.write(min_bin_env_lines.replace('source', '#source').encode('ascii'))
77
78 def build(self, name: str, dest: str, archive_path: str, build_cls: BuildManifest.Build) -> None:
79 # extract dest and build dest are not the same, this is restoring the extract dest
80 # mainly due to rpm requires several different setups compares to tarball and zip
81 ext_dest = os.path.dirname(archive_path)
82 min_source_path = os.path.join(ext_dest, 'usr', 'share', self.filename)
83 min_dest_path = os.path.join(ext_dest, self.min_path)
84 min_bin_env_path = os.path.join(min_dest_path, 'bin', f"{self.filename}-env")
85 bundle_artifact_path: str = None
86
87 # Remove env var
88 logging.info('Organize folder structure before generating rpm')
89 os.environ.pop('OPENSEARCH_PATH_CONF', None)
90
91 # Restore config file and core folder to original location
92 shutil.move(f"{min_bin_env_path}.backup", min_bin_env_path)
93 shutil.move(min_dest_path, min_source_path)
94
95 # Run bundle rpmbuild
96 bundle_cmd = " ".join(
97 [
98 'rpmbuild',
99 '-bb',
100 f"--define '_topdir {ext_dest}'",
101 f"--define '_version {build_cls.version}'",
102 f"--define '_architecture {rpm_architecture(build_cls.architecture)}'",
103 f"{self.filename}.rpm.spec",
104 ]
105 )
106
107 logging.info(f"Execute {bundle_cmd} in {ext_dest}")
108 subprocess.check_call(bundle_cmd, cwd=ext_dest, shell=True)
109
110 # Move artifact to repo root before being published to {dest}
111 for dirpath, dirnames, filenames in os.walk(os.path.join(ext_dest, 'RPMS')):
112 for filename in [file for file in filenames if file.endswith('.rpm')]:
113 bundle_artifact_path = os.path.join(dirpath, filename)
114 break
115
116 shutil.move(bundle_artifact_path, name)
117
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/assemble_workflow/bundle_rpm.py b/src/assemble_workflow/bundle_rpm.py
--- a/src/assemble_workflow/bundle_rpm.py
+++ b/src/assemble_workflow/bundle_rpm.py
@@ -89,7 +89,10 @@
os.environ.pop('OPENSEARCH_PATH_CONF', None)
# Restore config file and core folder to original location
- shutil.move(f"{min_bin_env_path}.backup", min_bin_env_path)
+ if os.path.exists(f"{min_bin_env_path}.backup"):
+ logging.info(f"Restore {min_bin_env_path}.backup to {min_bin_env_path}")
+ shutil.move(f"{min_bin_env_path}.backup", min_bin_env_path)
+
shutil.move(min_dest_path, min_source_path)
# Run bundle rpmbuild
|
{"golden_diff": "diff --git a/src/assemble_workflow/bundle_rpm.py b/src/assemble_workflow/bundle_rpm.py\n--- a/src/assemble_workflow/bundle_rpm.py\n+++ b/src/assemble_workflow/bundle_rpm.py\n@@ -89,7 +89,10 @@\n os.environ.pop('OPENSEARCH_PATH_CONF', None)\n \n # Restore config file and core folder to original location\n- shutil.move(f\"{min_bin_env_path}.backup\", min_bin_env_path)\n+ if os.path.exists(f\"{min_bin_env_path}.backup\"):\n+ logging.info(f\"Restore {min_bin_env_path}.backup to {min_bin_env_path}\")\n+ shutil.move(f\"{min_bin_env_path}.backup\", min_bin_env_path)\n+\n shutil.move(min_dest_path, min_source_path)\n \n # Run bundle rpmbuild\n", "issue": "[RPM M1] Add a new block to call the generation code for RPM\nTasks | Estimate | Status | Notes | Dependencies\r\n-- | -- | -- | -- | --\r\nThe generation code should pull the artifacts from the build workflow to a temporary location | 1 | Complete | \u00a0 | Build workflow must provide usable artifacts\r\nThe code will call existing install function to install plugins on min artifacts | 1 | Complete | \u00a0 | \u00a0\r\nAfter installation, the code will execute a tool or utility to wrap all the content into a RPM package | 5 | Complete | Require writing a script to utilize FPM to start with and later implement in pure python code. <br><br>20220204: We might change to rpmbuild directly without using FPM. See comments. | FPM usages\r\nThe code will also add dependencies to the RPM package so that things like JDK and additional libs for plugins can be installed and pulled separately | 5 | Complete | Need to study on RPM dependency setups | RPM Build Dependencies and the locations of each dependent artifact\r\nThe code will move the RPM package from the temp location to dist folder | 2 | Complete | \u00a0 | \u00a0\r\n\r\n\n", "before_files": [{"content": "# SPDX-License-Identifier: Apache-2.0\n#\n# The OpenSearch Contributors require contributions made to\n# this file be licensed under the Apache-2.0 license or a\n# compatible open source license.\n\nimport logging\nimport os\nimport shutil\nimport subprocess\n\nfrom manifests.build_manifest import BuildManifest\nfrom system.os import rpm_architecture\n\n\nclass BundleRpm:\n\n def __init__(self, filename: str, package_path: str, min_path: str) -> None:\n self.filename = filename\n self.package_path = package_path\n self.min_path = min_path\n\n def extract(self, dest: str) -> None:\n cpio_basename = os.path.splitext(os.path.basename(self.package_path))[0]\n cpio_path = os.path.join(dest, f\"{cpio_basename}.cpio\")\n min_source_path = os.path.join(dest, 'usr', 'share', self.filename)\n min_dest_path = os.path.join(dest, self.min_path)\n min_config_path = os.path.join(dest, 'etc', self.filename)\n min_bin_env_path = os.path.join(min_dest_path, 'bin', f\"{self.filename}-env\")\n\n # Convert rpm to cpio so we can extract the content\n logging.info(f\"Convert rpm to cpio for extraction: {self.package_path} to {cpio_path}\")\n with open(cpio_path, 'wb') as fp:\n subprocess.check_call(\n [\n 'rpm2cpio',\n self.package_path,\n ],\n stdout=fp,\n cwd=dest,\n )\n\n # Extract cpio archive based on the rpm package\n logging.info(f\"Extract cpio {cpio_path} content to {dest}\")\n with open(cpio_path, 'rb') as fp:\n subprocess.check_call(\n [\n 'cpio',\n '-imdv',\n ],\n stdin=fp,\n stdout=subprocess.DEVNULL,\n stderr=subprocess.STDOUT,\n cwd=dest,\n )\n\n # Move core folder destination so plugin install can proceed\n logging.info(f\"Move {min_source_path} to {min_dest_path} for plugin installation\")\n shutil.move(min_source_path, min_dest_path)\n\n # Multiple modifications and env vars setups before install plugins\n # As bin/opensearch-env is different between archive and package\n # https://github.com/opensearch-project/OpenSearch/issues/2092\n os.environ[f\"{self.filename.upper()}_PATH_CONF\"] = min_config_path\n\n if os.path.exists(min_bin_env_path):\n # Backup original file\n shutil.copy2(min_bin_env_path, f\"{min_bin_env_path}.backup\")\n # Prevent sourcing as file is only in place after rpm installation\n # So that min can install plugin zips\n # Have to decode then encode back to ascii due to mypy complains TextIO not equals to BinaryIO\n with open(min_bin_env_path, 'rb') as fp:\n min_bin_env_lines = fp.read().decode('ascii')\n\n with open(min_bin_env_path, 'wb') as fp:\n fp.write(min_bin_env_lines.replace('source', '#source').encode('ascii'))\n\n def build(self, name: str, dest: str, archive_path: str, build_cls: BuildManifest.Build) -> None:\n # extract dest and build dest are not the same, this is restoring the extract dest\n # mainly due to rpm requires several different setups compares to tarball and zip\n ext_dest = os.path.dirname(archive_path)\n min_source_path = os.path.join(ext_dest, 'usr', 'share', self.filename)\n min_dest_path = os.path.join(ext_dest, self.min_path)\n min_bin_env_path = os.path.join(min_dest_path, 'bin', f\"{self.filename}-env\")\n bundle_artifact_path: str = None\n\n # Remove env var\n logging.info('Organize folder structure before generating rpm')\n os.environ.pop('OPENSEARCH_PATH_CONF', None)\n\n # Restore config file and core folder to original location\n shutil.move(f\"{min_bin_env_path}.backup\", min_bin_env_path)\n shutil.move(min_dest_path, min_source_path)\n\n # Run bundle rpmbuild\n bundle_cmd = \" \".join(\n [\n 'rpmbuild',\n '-bb',\n f\"--define '_topdir {ext_dest}'\",\n f\"--define '_version {build_cls.version}'\",\n f\"--define '_architecture {rpm_architecture(build_cls.architecture)}'\",\n f\"{self.filename}.rpm.spec\",\n ]\n )\n\n logging.info(f\"Execute {bundle_cmd} in {ext_dest}\")\n subprocess.check_call(bundle_cmd, cwd=ext_dest, shell=True)\n\n # Move artifact to repo root before being published to {dest}\n for dirpath, dirnames, filenames in os.walk(os.path.join(ext_dest, 'RPMS')):\n for filename in [file for file in filenames if file.endswith('.rpm')]:\n bundle_artifact_path = os.path.join(dirpath, filename)\n break\n\n shutil.move(bundle_artifact_path, name)\n", "path": "src/assemble_workflow/bundle_rpm.py"}], "after_files": [{"content": "# SPDX-License-Identifier: Apache-2.0\n#\n# The OpenSearch Contributors require contributions made to\n# this file be licensed under the Apache-2.0 license or a\n# compatible open source license.\n\nimport logging\nimport os\nimport shutil\nimport subprocess\n\nfrom manifests.build_manifest import BuildManifest\nfrom system.os import rpm_architecture\n\n\nclass BundleRpm:\n\n def __init__(self, filename: str, package_path: str, min_path: str) -> None:\n self.filename = filename\n self.package_path = package_path\n self.min_path = min_path\n\n def extract(self, dest: str) -> None:\n cpio_basename = os.path.splitext(os.path.basename(self.package_path))[0]\n cpio_path = os.path.join(dest, f\"{cpio_basename}.cpio\")\n min_source_path = os.path.join(dest, 'usr', 'share', self.filename)\n min_dest_path = os.path.join(dest, self.min_path)\n min_config_path = os.path.join(dest, 'etc', self.filename)\n min_bin_env_path = os.path.join(min_dest_path, 'bin', f\"{self.filename}-env\")\n\n # Convert rpm to cpio so we can extract the content\n logging.info(f\"Convert rpm to cpio for extraction: {self.package_path} to {cpio_path}\")\n with open(cpio_path, 'wb') as fp:\n subprocess.check_call(\n [\n 'rpm2cpio',\n self.package_path,\n ],\n stdout=fp,\n cwd=dest,\n )\n\n # Extract cpio archive based on the rpm package\n logging.info(f\"Extract cpio {cpio_path} content to {dest}\")\n with open(cpio_path, 'rb') as fp:\n subprocess.check_call(\n [\n 'cpio',\n '-imdv',\n ],\n stdin=fp,\n stdout=subprocess.DEVNULL,\n stderr=subprocess.STDOUT,\n cwd=dest,\n )\n\n # Move core folder destination so plugin install can proceed\n logging.info(f\"Move {min_source_path} to {min_dest_path} for plugin installation\")\n shutil.move(min_source_path, min_dest_path)\n\n # Multiple modifications and env vars setups before install plugins\n # As bin/opensearch-env is different between archive and package\n # https://github.com/opensearch-project/OpenSearch/issues/2092\n os.environ[f\"{self.filename.upper()}_PATH_CONF\"] = min_config_path\n\n if os.path.exists(min_bin_env_path):\n # Backup original file\n shutil.copy2(min_bin_env_path, f\"{min_bin_env_path}.backup\")\n # Prevent sourcing as file is only in place after rpm installation\n # So that min can install plugin zips\n # Have to decode then encode back to ascii due to mypy complains TextIO not equals to BinaryIO\n with open(min_bin_env_path, 'rb') as fp:\n min_bin_env_lines = fp.read().decode('ascii')\n\n with open(min_bin_env_path, 'wb') as fp:\n fp.write(min_bin_env_lines.replace('source', '#source').encode('ascii'))\n\n def build(self, name: str, dest: str, archive_path: str, build_cls: BuildManifest.Build) -> None:\n # extract dest and build dest are not the same, this is restoring the extract dest\n # mainly due to rpm requires several different setups compares to tarball and zip\n ext_dest = os.path.dirname(archive_path)\n min_source_path = os.path.join(ext_dest, 'usr', 'share', self.filename)\n min_dest_path = os.path.join(ext_dest, self.min_path)\n min_bin_env_path = os.path.join(min_dest_path, 'bin', f\"{self.filename}-env\")\n bundle_artifact_path: str = None\n\n # Remove env var\n logging.info('Organize folder structure before generating rpm')\n os.environ.pop('OPENSEARCH_PATH_CONF', None)\n\n # Restore config file and core folder to original location\n if os.path.exists(f\"{min_bin_env_path}.backup\"):\n logging.info(f\"Restore {min_bin_env_path}.backup to {min_bin_env_path}\")\n shutil.move(f\"{min_bin_env_path}.backup\", min_bin_env_path)\n\n shutil.move(min_dest_path, min_source_path)\n\n # Run bundle rpmbuild\n bundle_cmd = \" \".join(\n [\n 'rpmbuild',\n '-bb',\n f\"--define '_topdir {ext_dest}'\",\n f\"--define '_version {build_cls.version}'\",\n f\"--define '_architecture {rpm_architecture(build_cls.architecture)}'\",\n f\"{self.filename}.rpm.spec\",\n ]\n )\n\n logging.info(f\"Execute {bundle_cmd} in {ext_dest}\")\n subprocess.check_call(bundle_cmd, cwd=ext_dest, shell=True)\n\n # Move artifact to repo root before being published to {dest}\n for dirpath, dirnames, filenames in os.walk(os.path.join(ext_dest, 'RPMS')):\n for filename in [file for file in filenames if file.endswith('.rpm')]:\n bundle_artifact_path = os.path.join(dirpath, filename)\n break\n\n shutil.move(bundle_artifact_path, name)\n", "path": "src/assemble_workflow/bundle_rpm.py"}]}
| 1,842 | 180 |
gh_patches_debug_14519
|
rasdani/github-patches
|
git_diff
|
googleapis__google-cloud-python-677
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Silent error when calling datastore.put() on nonlist
Repro instructions:
```
ent = datastore.Entity(key=datastore.Key('test',1))
datastore.put(ent)
```
Entity is not put in the datastore, and call completes successfully.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `gcloud/datastore/api.py`
Content:
```
1 # Copyright 2014 Google Inc. All rights reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Methods for interacting with Google Cloud Datastore.
16
17 Allows interacting with the datastore via user-friendly Key, Entity and
18 Query objects rather than via protobufs.
19 """
20
21 from gcloud.datastore import _implicit_environ
22 from gcloud.datastore.batch import Batch
23 from gcloud.datastore.transaction import Transaction
24 from gcloud.datastore import helpers
25
26
27 _MAX_LOOPS = 128
28 """Maximum number of iterations to wait for deferred keys."""
29
30
31 def _require_dataset_id(dataset_id=None, first_key=None):
32 """Infer a dataset ID from the environment, if not passed explicitly.
33
34 Order of precedence:
35
36 - Passed `dataset_id` (if not None).
37 - `dataset_id` of current batch / transaction (if current exists).
38 - `dataset_id` of first key
39 - `dataset_id` inferred from the environment (if `set_default_dataset_id`
40 has been called).
41
42 :type dataset_id: string
43 :param dataset_id: Optional.
44
45 :type first_key: :class:`gcloud.datastore.key.Key` or None
46 :param first_key: Optional: first key being manipulated.
47
48 :rtype: string
49 :returns: A dataset ID based on the current environment.
50 :raises: :class:`EnvironmentError` if ``dataset_id`` is ``None``,
51 and cannot be inferred from the environment.
52 """
53 if dataset_id is not None:
54 return dataset_id
55 top = Batch.current()
56 if top is not None:
57 return top.dataset_id
58 if first_key is not None:
59 return first_key.dataset_id
60
61 dataset_id = _implicit_environ.get_default_dataset_id()
62 if dataset_id is None:
63 raise EnvironmentError('Dataset ID could not be inferred.')
64 return dataset_id
65
66
67 def _require_connection(connection=None):
68 """Infer a connection from the environment, if not passed explicitly.
69
70 :type connection: :class:`gcloud.datastore.connection.Connection`
71 :param connection: Optional.
72
73 :rtype: :class:`gcloud.datastore.connection.Connection`
74 :returns: A connection based on the current environment.
75 :raises: :class:`EnvironmentError` if ``connection`` is ``None``, and
76 cannot be inferred from the environment.
77 """
78 if connection is None:
79 top = Batch.current()
80 if top is not None:
81 connection = top.connection
82 else:
83 connection = _implicit_environ.get_default_connection()
84 if connection is None:
85 raise EnvironmentError('Connection could not be inferred.')
86 return connection
87
88
89 def _extended_lookup(connection, dataset_id, key_pbs,
90 missing=None, deferred=None,
91 eventual=False, transaction_id=None):
92 """Repeat lookup until all keys found (unless stop requested).
93
94 Helper method for :func:`get`.
95
96 :type connection: :class:`gcloud.datastore.connection.Connection`
97 :param connection: The connection used to connect to datastore.
98
99 :type dataset_id: string
100 :param dataset_id: The ID of the dataset of which to make the request.
101
102 :type key_pbs: list of :class:`gcloud.datastore._datastore_v1_pb2.Key`
103 :param key_pbs: The keys to retrieve from the datastore.
104
105 :type missing: an empty list or None.
106 :param missing: If a list is passed, the key-only entity protobufs
107 returned by the backend as "missing" will be copied
108 into it. Use only as a keyword param.
109
110 :type deferred: an empty list or None.
111 :param deferred: If a list is passed, the key protobufs returned
112 by the backend as "deferred" will be copied into it.
113 Use only as a keyword param.
114
115 :type eventual: boolean
116 :param eventual: If False (the default), request ``STRONG`` read
117 consistency. If True, request ``EVENTUAL`` read
118 consistency.
119
120 :type transaction_id: string
121 :param transaction_id: If passed, make the request in the scope of
122 the given transaction. Incompatible with
123 ``eventual==True``.
124
125 :rtype: list of :class:`gcloud.datastore._datastore_v1_pb2.Entity`
126 :returns: The requested entities.
127 :raises: :class:`ValueError` if missing / deferred are not null or
128 empty list.
129 """
130 if missing is not None and missing != []:
131 raise ValueError('missing must be None or an empty list')
132
133 if deferred is not None and deferred != []:
134 raise ValueError('deferred must be None or an empty list')
135
136 results = []
137
138 loop_num = 0
139 while loop_num < _MAX_LOOPS: # loop against possible deferred.
140 loop_num += 1
141
142 results_found, missing_found, deferred_found = connection.lookup(
143 dataset_id=dataset_id,
144 key_pbs=key_pbs,
145 eventual=eventual,
146 transaction_id=transaction_id,
147 )
148
149 results.extend(results_found)
150
151 if missing is not None:
152 missing.extend(missing_found)
153
154 if deferred is not None:
155 deferred.extend(deferred_found)
156 break
157
158 if len(deferred_found) == 0:
159 break
160
161 # We have deferred keys, and the user didn't ask to know about
162 # them, so retry (but only with the deferred ones).
163 key_pbs = deferred_found
164
165 return results
166
167
168 def get(keys, missing=None, deferred=None, connection=None, dataset_id=None):
169 """Retrieves entities, along with their attributes.
170
171 :type keys: list of :class:`gcloud.datastore.key.Key`
172 :param keys: The keys to be retrieved from the datastore.
173
174 :type missing: an empty list or None.
175 :param missing: If a list is passed, the key-only entities returned
176 by the backend as "missing" will be copied into it.
177 Use only as a keyword param.
178
179 :type deferred: an empty list or None.
180 :param deferred: If a list is passed, the keys returned
181 by the backend as "deferred" will be copied into it.
182 Use only as a keyword param.
183
184 :type connection: :class:`gcloud.datastore.connection.Connection`
185 :param connection: Optional. The connection used to connect to datastore.
186 If not passed, inferred from the environment.
187
188 :type dataset_id: :class:`gcloud.datastore.connection.Connection`
189 :param dataset_id: Optional. The dataset ID used to connect to datastore.
190 If not passed, inferred from the environment.
191
192 :rtype: list of :class:`gcloud.datastore.entity.Entity`
193 :returns: The requested entities.
194 :raises: EnvironmentError if ``connection`` or ``dataset_id`` not passed,
195 and cannot be inferred from the environment. ValueError if
196 one or more of ``keys`` has a dataset ID which does not match
197 the passed / inferred dataset ID.
198 """
199 if not keys:
200 return []
201
202 connection = _require_connection(connection)
203 dataset_id = _require_dataset_id(dataset_id, keys[0])
204
205 if list(set([key.dataset_id for key in keys])) != [dataset_id]:
206 raise ValueError('Keys do not match dataset ID')
207
208 transaction = Transaction.current()
209
210 entity_pbs = _extended_lookup(
211 connection,
212 dataset_id=dataset_id,
213 key_pbs=[k.to_protobuf() for k in keys],
214 missing=missing,
215 deferred=deferred,
216 transaction_id=transaction and transaction.id,
217 )
218
219 if missing is not None:
220 missing[:] = [
221 helpers.entity_from_protobuf(missed_pb)
222 for missed_pb in missing]
223
224 if deferred is not None:
225 deferred[:] = [
226 helpers.key_from_protobuf(deferred_pb)
227 for deferred_pb in deferred]
228
229 entities = []
230 for entity_pb in entity_pbs:
231 entities.append(helpers.entity_from_protobuf(entity_pb))
232
233 return entities
234
235
236 def put(entities, connection=None, dataset_id=None):
237 """Save the entities in the Cloud Datastore.
238
239 :type entities: list of :class:`gcloud.datastore.entity.Entity`
240 :param entities: The entities to be saved to the datastore.
241
242 :type connection: :class:`gcloud.datastore.connection.Connection`
243 :param connection: Optional connection used to connect to datastore.
244 If not passed, inferred from the environment.
245
246 :type dataset_id: :class:`gcloud.datastore.connection.Connection`
247 :param dataset_id: Optional. The dataset ID used to connect to datastore.
248 If not passed, inferred from the environment.
249
250 :raises: EnvironmentError if ``connection`` or ``dataset_id`` not passed,
251 and cannot be inferred from the environment. ValueError if
252 one or more entities has a key with a dataset ID not matching
253 the passed / inferred dataset ID.
254 """
255 if not entities:
256 return
257
258 connection = _require_connection(connection)
259 dataset_id = _require_dataset_id(dataset_id, entities[0].key)
260
261 current = Batch.current()
262 in_batch = current is not None
263 if not in_batch:
264 current = Batch(dataset_id=dataset_id, connection=connection)
265 for entity in entities:
266 current.put(entity)
267 if not in_batch:
268 current.commit()
269
270
271 def delete(keys, connection=None, dataset_id=None):
272 """Delete the keys in the Cloud Datastore.
273
274 :type keys: list of :class:`gcloud.datastore.key.Key`
275 :param keys: The keys to be deleted from the datastore.
276
277 :type connection: :class:`gcloud.datastore.connection.Connection`
278 :param connection: Optional connection used to connect to datastore.
279 If not passed, inferred from the environment.
280
281 :type dataset_id: :class:`gcloud.datastore.connection.Connection`
282 :param dataset_id: Optional. The dataset ID used to connect to datastore.
283 If not passed, inferred from the environment.
284
285 :raises: EnvironmentError if ``connection`` or ``dataset_id`` not passed,
286 and cannot be inferred from the environment. ValueError if
287 one or more keys has a dataset ID not matching the passed /
288 inferred dataset ID.
289 """
290 if not keys:
291 return
292
293 connection = _require_connection(connection)
294 dataset_id = _require_dataset_id(dataset_id, keys[0])
295
296 # We allow partial keys to attempt a delete, the backend will fail.
297 current = Batch.current()
298 in_batch = current is not None
299 if not in_batch:
300 current = Batch(dataset_id=dataset_id, connection=connection)
301 for key in keys:
302 current.delete(key)
303 if not in_batch:
304 current.commit()
305
306
307 def allocate_ids(incomplete_key, num_ids, connection=None):
308 """Allocates a list of IDs from a partial key.
309
310 :type incomplete_key: A :class:`gcloud.datastore.key.Key`
311 :param incomplete_key: Partial key to use as base for allocated IDs.
312
313 :type num_ids: integer
314 :param num_ids: The number of IDs to allocate.
315
316 :type connection: :class:`gcloud.datastore.connection.Connection`
317 :param connection: Optional. The connection used to connect to datastore.
318
319 :rtype: list of :class:`gcloud.datastore.key.Key`
320 :returns: The (complete) keys allocated with ``incomplete_key`` as root.
321 :raises: :class:`ValueError` if ``incomplete_key`` is not a partial key.
322 """
323 connection = _require_connection(connection)
324
325 if not incomplete_key.is_partial:
326 raise ValueError(('Key is not partial.', incomplete_key))
327
328 incomplete_key_pb = incomplete_key.to_protobuf()
329 incomplete_key_pbs = [incomplete_key_pb] * num_ids
330
331 allocated_key_pbs = connection.allocate_ids(incomplete_key.dataset_id,
332 incomplete_key_pbs)
333 allocated_ids = [allocated_key_pb.path_element[-1].id
334 for allocated_key_pb in allocated_key_pbs]
335 return [incomplete_key.completed_key(allocated_id)
336 for allocated_id in allocated_ids]
337
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/gcloud/datastore/api.py b/gcloud/datastore/api.py
--- a/gcloud/datastore/api.py
+++ b/gcloud/datastore/api.py
@@ -20,6 +20,7 @@
from gcloud.datastore import _implicit_environ
from gcloud.datastore.batch import Batch
+from gcloud.datastore.entity import Entity
from gcloud.datastore.transaction import Transaction
from gcloud.datastore import helpers
@@ -252,6 +253,9 @@
one or more entities has a key with a dataset ID not matching
the passed / inferred dataset ID.
"""
+ if isinstance(entities, Entity):
+ raise ValueError("Pass a sequence of entities")
+
if not entities:
return
|
{"golden_diff": "diff --git a/gcloud/datastore/api.py b/gcloud/datastore/api.py\n--- a/gcloud/datastore/api.py\n+++ b/gcloud/datastore/api.py\n@@ -20,6 +20,7 @@\n \n from gcloud.datastore import _implicit_environ\n from gcloud.datastore.batch import Batch\n+from gcloud.datastore.entity import Entity\n from gcloud.datastore.transaction import Transaction\n from gcloud.datastore import helpers\n \n@@ -252,6 +253,9 @@\n one or more entities has a key with a dataset ID not matching\n the passed / inferred dataset ID.\n \"\"\"\n+ if isinstance(entities, Entity):\n+ raise ValueError(\"Pass a sequence of entities\")\n+\n if not entities:\n return\n", "issue": "Silent error when calling datastore.put() on nonlist\nRepro instructions:\n\n```\nent = datastore.Entity(key=datastore.Key('test',1))\ndatastore.put(ent)\n```\n\nEntity is not put in the datastore, and call completes successfully. \n\n", "before_files": [{"content": "# Copyright 2014 Google Inc. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Methods for interacting with Google Cloud Datastore.\n\nAllows interacting with the datastore via user-friendly Key, Entity and\nQuery objects rather than via protobufs.\n\"\"\"\n\nfrom gcloud.datastore import _implicit_environ\nfrom gcloud.datastore.batch import Batch\nfrom gcloud.datastore.transaction import Transaction\nfrom gcloud.datastore import helpers\n\n\n_MAX_LOOPS = 128\n\"\"\"Maximum number of iterations to wait for deferred keys.\"\"\"\n\n\ndef _require_dataset_id(dataset_id=None, first_key=None):\n \"\"\"Infer a dataset ID from the environment, if not passed explicitly.\n\n Order of precedence:\n\n - Passed `dataset_id` (if not None).\n - `dataset_id` of current batch / transaction (if current exists).\n - `dataset_id` of first key\n - `dataset_id` inferred from the environment (if `set_default_dataset_id`\n has been called).\n\n :type dataset_id: string\n :param dataset_id: Optional.\n\n :type first_key: :class:`gcloud.datastore.key.Key` or None\n :param first_key: Optional: first key being manipulated.\n\n :rtype: string\n :returns: A dataset ID based on the current environment.\n :raises: :class:`EnvironmentError` if ``dataset_id`` is ``None``,\n and cannot be inferred from the environment.\n \"\"\"\n if dataset_id is not None:\n return dataset_id\n top = Batch.current()\n if top is not None:\n return top.dataset_id\n if first_key is not None:\n return first_key.dataset_id\n\n dataset_id = _implicit_environ.get_default_dataset_id()\n if dataset_id is None:\n raise EnvironmentError('Dataset ID could not be inferred.')\n return dataset_id\n\n\ndef _require_connection(connection=None):\n \"\"\"Infer a connection from the environment, if not passed explicitly.\n\n :type connection: :class:`gcloud.datastore.connection.Connection`\n :param connection: Optional.\n\n :rtype: :class:`gcloud.datastore.connection.Connection`\n :returns: A connection based on the current environment.\n :raises: :class:`EnvironmentError` if ``connection`` is ``None``, and\n cannot be inferred from the environment.\n \"\"\"\n if connection is None:\n top = Batch.current()\n if top is not None:\n connection = top.connection\n else:\n connection = _implicit_environ.get_default_connection()\n if connection is None:\n raise EnvironmentError('Connection could not be inferred.')\n return connection\n\n\ndef _extended_lookup(connection, dataset_id, key_pbs,\n missing=None, deferred=None,\n eventual=False, transaction_id=None):\n \"\"\"Repeat lookup until all keys found (unless stop requested).\n\n Helper method for :func:`get`.\n\n :type connection: :class:`gcloud.datastore.connection.Connection`\n :param connection: The connection used to connect to datastore.\n\n :type dataset_id: string\n :param dataset_id: The ID of the dataset of which to make the request.\n\n :type key_pbs: list of :class:`gcloud.datastore._datastore_v1_pb2.Key`\n :param key_pbs: The keys to retrieve from the datastore.\n\n :type missing: an empty list or None.\n :param missing: If a list is passed, the key-only entity protobufs\n returned by the backend as \"missing\" will be copied\n into it. Use only as a keyword param.\n\n :type deferred: an empty list or None.\n :param deferred: If a list is passed, the key protobufs returned\n by the backend as \"deferred\" will be copied into it.\n Use only as a keyword param.\n\n :type eventual: boolean\n :param eventual: If False (the default), request ``STRONG`` read\n consistency. If True, request ``EVENTUAL`` read\n consistency.\n\n :type transaction_id: string\n :param transaction_id: If passed, make the request in the scope of\n the given transaction. Incompatible with\n ``eventual==True``.\n\n :rtype: list of :class:`gcloud.datastore._datastore_v1_pb2.Entity`\n :returns: The requested entities.\n :raises: :class:`ValueError` if missing / deferred are not null or\n empty list.\n \"\"\"\n if missing is not None and missing != []:\n raise ValueError('missing must be None or an empty list')\n\n if deferred is not None and deferred != []:\n raise ValueError('deferred must be None or an empty list')\n\n results = []\n\n loop_num = 0\n while loop_num < _MAX_LOOPS: # loop against possible deferred.\n loop_num += 1\n\n results_found, missing_found, deferred_found = connection.lookup(\n dataset_id=dataset_id,\n key_pbs=key_pbs,\n eventual=eventual,\n transaction_id=transaction_id,\n )\n\n results.extend(results_found)\n\n if missing is not None:\n missing.extend(missing_found)\n\n if deferred is not None:\n deferred.extend(deferred_found)\n break\n\n if len(deferred_found) == 0:\n break\n\n # We have deferred keys, and the user didn't ask to know about\n # them, so retry (but only with the deferred ones).\n key_pbs = deferred_found\n\n return results\n\n\ndef get(keys, missing=None, deferred=None, connection=None, dataset_id=None):\n \"\"\"Retrieves entities, along with their attributes.\n\n :type keys: list of :class:`gcloud.datastore.key.Key`\n :param keys: The keys to be retrieved from the datastore.\n\n :type missing: an empty list or None.\n :param missing: If a list is passed, the key-only entities returned\n by the backend as \"missing\" will be copied into it.\n Use only as a keyword param.\n\n :type deferred: an empty list or None.\n :param deferred: If a list is passed, the keys returned\n by the backend as \"deferred\" will be copied into it.\n Use only as a keyword param.\n\n :type connection: :class:`gcloud.datastore.connection.Connection`\n :param connection: Optional. The connection used to connect to datastore.\n If not passed, inferred from the environment.\n\n :type dataset_id: :class:`gcloud.datastore.connection.Connection`\n :param dataset_id: Optional. The dataset ID used to connect to datastore.\n If not passed, inferred from the environment.\n\n :rtype: list of :class:`gcloud.datastore.entity.Entity`\n :returns: The requested entities.\n :raises: EnvironmentError if ``connection`` or ``dataset_id`` not passed,\n and cannot be inferred from the environment. ValueError if\n one or more of ``keys`` has a dataset ID which does not match\n the passed / inferred dataset ID.\n \"\"\"\n if not keys:\n return []\n\n connection = _require_connection(connection)\n dataset_id = _require_dataset_id(dataset_id, keys[0])\n\n if list(set([key.dataset_id for key in keys])) != [dataset_id]:\n raise ValueError('Keys do not match dataset ID')\n\n transaction = Transaction.current()\n\n entity_pbs = _extended_lookup(\n connection,\n dataset_id=dataset_id,\n key_pbs=[k.to_protobuf() for k in keys],\n missing=missing,\n deferred=deferred,\n transaction_id=transaction and transaction.id,\n )\n\n if missing is not None:\n missing[:] = [\n helpers.entity_from_protobuf(missed_pb)\n for missed_pb in missing]\n\n if deferred is not None:\n deferred[:] = [\n helpers.key_from_protobuf(deferred_pb)\n for deferred_pb in deferred]\n\n entities = []\n for entity_pb in entity_pbs:\n entities.append(helpers.entity_from_protobuf(entity_pb))\n\n return entities\n\n\ndef put(entities, connection=None, dataset_id=None):\n \"\"\"Save the entities in the Cloud Datastore.\n\n :type entities: list of :class:`gcloud.datastore.entity.Entity`\n :param entities: The entities to be saved to the datastore.\n\n :type connection: :class:`gcloud.datastore.connection.Connection`\n :param connection: Optional connection used to connect to datastore.\n If not passed, inferred from the environment.\n\n :type dataset_id: :class:`gcloud.datastore.connection.Connection`\n :param dataset_id: Optional. The dataset ID used to connect to datastore.\n If not passed, inferred from the environment.\n\n :raises: EnvironmentError if ``connection`` or ``dataset_id`` not passed,\n and cannot be inferred from the environment. ValueError if\n one or more entities has a key with a dataset ID not matching\n the passed / inferred dataset ID.\n \"\"\"\n if not entities:\n return\n\n connection = _require_connection(connection)\n dataset_id = _require_dataset_id(dataset_id, entities[0].key)\n\n current = Batch.current()\n in_batch = current is not None\n if not in_batch:\n current = Batch(dataset_id=dataset_id, connection=connection)\n for entity in entities:\n current.put(entity)\n if not in_batch:\n current.commit()\n\n\ndef delete(keys, connection=None, dataset_id=None):\n \"\"\"Delete the keys in the Cloud Datastore.\n\n :type keys: list of :class:`gcloud.datastore.key.Key`\n :param keys: The keys to be deleted from the datastore.\n\n :type connection: :class:`gcloud.datastore.connection.Connection`\n :param connection: Optional connection used to connect to datastore.\n If not passed, inferred from the environment.\n\n :type dataset_id: :class:`gcloud.datastore.connection.Connection`\n :param dataset_id: Optional. The dataset ID used to connect to datastore.\n If not passed, inferred from the environment.\n\n :raises: EnvironmentError if ``connection`` or ``dataset_id`` not passed,\n and cannot be inferred from the environment. ValueError if\n one or more keys has a dataset ID not matching the passed /\n inferred dataset ID.\n \"\"\"\n if not keys:\n return\n\n connection = _require_connection(connection)\n dataset_id = _require_dataset_id(dataset_id, keys[0])\n\n # We allow partial keys to attempt a delete, the backend will fail.\n current = Batch.current()\n in_batch = current is not None\n if not in_batch:\n current = Batch(dataset_id=dataset_id, connection=connection)\n for key in keys:\n current.delete(key)\n if not in_batch:\n current.commit()\n\n\ndef allocate_ids(incomplete_key, num_ids, connection=None):\n \"\"\"Allocates a list of IDs from a partial key.\n\n :type incomplete_key: A :class:`gcloud.datastore.key.Key`\n :param incomplete_key: Partial key to use as base for allocated IDs.\n\n :type num_ids: integer\n :param num_ids: The number of IDs to allocate.\n\n :type connection: :class:`gcloud.datastore.connection.Connection`\n :param connection: Optional. The connection used to connect to datastore.\n\n :rtype: list of :class:`gcloud.datastore.key.Key`\n :returns: The (complete) keys allocated with ``incomplete_key`` as root.\n :raises: :class:`ValueError` if ``incomplete_key`` is not a partial key.\n \"\"\"\n connection = _require_connection(connection)\n\n if not incomplete_key.is_partial:\n raise ValueError(('Key is not partial.', incomplete_key))\n\n incomplete_key_pb = incomplete_key.to_protobuf()\n incomplete_key_pbs = [incomplete_key_pb] * num_ids\n\n allocated_key_pbs = connection.allocate_ids(incomplete_key.dataset_id,\n incomplete_key_pbs)\n allocated_ids = [allocated_key_pb.path_element[-1].id\n for allocated_key_pb in allocated_key_pbs]\n return [incomplete_key.completed_key(allocated_id)\n for allocated_id in allocated_ids]\n", "path": "gcloud/datastore/api.py"}], "after_files": [{"content": "# Copyright 2014 Google Inc. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Methods for interacting with Google Cloud Datastore.\n\nAllows interacting with the datastore via user-friendly Key, Entity and\nQuery objects rather than via protobufs.\n\"\"\"\n\nfrom gcloud.datastore import _implicit_environ\nfrom gcloud.datastore.batch import Batch\nfrom gcloud.datastore.entity import Entity\nfrom gcloud.datastore.transaction import Transaction\nfrom gcloud.datastore import helpers\n\n\n_MAX_LOOPS = 128\n\"\"\"Maximum number of iterations to wait for deferred keys.\"\"\"\n\n\ndef _require_dataset_id(dataset_id=None, first_key=None):\n \"\"\"Infer a dataset ID from the environment, if not passed explicitly.\n\n Order of precedence:\n\n - Passed `dataset_id` (if not None).\n - `dataset_id` of current batch / transaction (if current exists).\n - `dataset_id` of first key\n - `dataset_id` inferred from the environment (if `set_default_dataset_id`\n has been called).\n\n :type dataset_id: string\n :param dataset_id: Optional.\n\n :type first_key: :class:`gcloud.datastore.key.Key` or None\n :param first_key: Optional: first key being manipulated.\n\n :rtype: string\n :returns: A dataset ID based on the current environment.\n :raises: :class:`EnvironmentError` if ``dataset_id`` is ``None``,\n and cannot be inferred from the environment.\n \"\"\"\n if dataset_id is not None:\n return dataset_id\n top = Batch.current()\n if top is not None:\n return top.dataset_id\n if first_key is not None:\n return first_key.dataset_id\n\n dataset_id = _implicit_environ.get_default_dataset_id()\n if dataset_id is None:\n raise EnvironmentError('Dataset ID could not be inferred.')\n return dataset_id\n\n\ndef _require_connection(connection=None):\n \"\"\"Infer a connection from the environment, if not passed explicitly.\n\n :type connection: :class:`gcloud.datastore.connection.Connection`\n :param connection: Optional.\n\n :rtype: :class:`gcloud.datastore.connection.Connection`\n :returns: A connection based on the current environment.\n :raises: :class:`EnvironmentError` if ``connection`` is ``None``, and\n cannot be inferred from the environment.\n \"\"\"\n if connection is None:\n top = Batch.current()\n if top is not None:\n connection = top.connection\n else:\n connection = _implicit_environ.get_default_connection()\n if connection is None:\n raise EnvironmentError('Connection could not be inferred.')\n return connection\n\n\ndef _extended_lookup(connection, dataset_id, key_pbs,\n missing=None, deferred=None,\n eventual=False, transaction_id=None):\n \"\"\"Repeat lookup until all keys found (unless stop requested).\n\n Helper method for :func:`get`.\n\n :type connection: :class:`gcloud.datastore.connection.Connection`\n :param connection: The connection used to connect to datastore.\n\n :type dataset_id: string\n :param dataset_id: The ID of the dataset of which to make the request.\n\n :type key_pbs: list of :class:`gcloud.datastore._datastore_v1_pb2.Key`\n :param key_pbs: The keys to retrieve from the datastore.\n\n :type missing: an empty list or None.\n :param missing: If a list is passed, the key-only entity protobufs\n returned by the backend as \"missing\" will be copied\n into it. Use only as a keyword param.\n\n :type deferred: an empty list or None.\n :param deferred: If a list is passed, the key protobufs returned\n by the backend as \"deferred\" will be copied into it.\n Use only as a keyword param.\n\n :type eventual: boolean\n :param eventual: If False (the default), request ``STRONG`` read\n consistency. If True, request ``EVENTUAL`` read\n consistency.\n\n :type transaction_id: string\n :param transaction_id: If passed, make the request in the scope of\n the given transaction. Incompatible with\n ``eventual==True``.\n\n :rtype: list of :class:`gcloud.datastore._datastore_v1_pb2.Entity`\n :returns: The requested entities.\n :raises: :class:`ValueError` if missing / deferred are not null or\n empty list.\n \"\"\"\n if missing is not None and missing != []:\n raise ValueError('missing must be None or an empty list')\n\n if deferred is not None and deferred != []:\n raise ValueError('deferred must be None or an empty list')\n\n results = []\n\n loop_num = 0\n while loop_num < _MAX_LOOPS: # loop against possible deferred.\n loop_num += 1\n\n results_found, missing_found, deferred_found = connection.lookup(\n dataset_id=dataset_id,\n key_pbs=key_pbs,\n eventual=eventual,\n transaction_id=transaction_id,\n )\n\n results.extend(results_found)\n\n if missing is not None:\n missing.extend(missing_found)\n\n if deferred is not None:\n deferred.extend(deferred_found)\n break\n\n if len(deferred_found) == 0:\n break\n\n # We have deferred keys, and the user didn't ask to know about\n # them, so retry (but only with the deferred ones).\n key_pbs = deferred_found\n\n return results\n\n\ndef get(keys, missing=None, deferred=None, connection=None, dataset_id=None):\n \"\"\"Retrieves entities, along with their attributes.\n\n :type keys: list of :class:`gcloud.datastore.key.Key`\n :param keys: The keys to be retrieved from the datastore.\n\n :type missing: an empty list or None.\n :param missing: If a list is passed, the key-only entities returned\n by the backend as \"missing\" will be copied into it.\n Use only as a keyword param.\n\n :type deferred: an empty list or None.\n :param deferred: If a list is passed, the keys returned\n by the backend as \"deferred\" will be copied into it.\n Use only as a keyword param.\n\n :type connection: :class:`gcloud.datastore.connection.Connection`\n :param connection: Optional. The connection used to connect to datastore.\n If not passed, inferred from the environment.\n\n :type dataset_id: :class:`gcloud.datastore.connection.Connection`\n :param dataset_id: Optional. The dataset ID used to connect to datastore.\n If not passed, inferred from the environment.\n\n :rtype: list of :class:`gcloud.datastore.entity.Entity`\n :returns: The requested entities.\n :raises: EnvironmentError if ``connection`` or ``dataset_id`` not passed,\n and cannot be inferred from the environment. ValueError if\n one or more of ``keys`` has a dataset ID which does not match\n the passed / inferred dataset ID.\n \"\"\"\n if not keys:\n return []\n\n connection = _require_connection(connection)\n dataset_id = _require_dataset_id(dataset_id, keys[0])\n\n if list(set([key.dataset_id for key in keys])) != [dataset_id]:\n raise ValueError('Keys do not match dataset ID')\n\n transaction = Transaction.current()\n\n entity_pbs = _extended_lookup(\n connection,\n dataset_id=dataset_id,\n key_pbs=[k.to_protobuf() for k in keys],\n missing=missing,\n deferred=deferred,\n transaction_id=transaction and transaction.id,\n )\n\n if missing is not None:\n missing[:] = [\n helpers.entity_from_protobuf(missed_pb)\n for missed_pb in missing]\n\n if deferred is not None:\n deferred[:] = [\n helpers.key_from_protobuf(deferred_pb)\n for deferred_pb in deferred]\n\n entities = []\n for entity_pb in entity_pbs:\n entities.append(helpers.entity_from_protobuf(entity_pb))\n\n return entities\n\n\ndef put(entities, connection=None, dataset_id=None):\n \"\"\"Save the entities in the Cloud Datastore.\n\n :type entities: list of :class:`gcloud.datastore.entity.Entity`\n :param entities: The entities to be saved to the datastore.\n\n :type connection: :class:`gcloud.datastore.connection.Connection`\n :param connection: Optional connection used to connect to datastore.\n If not passed, inferred from the environment.\n\n :type dataset_id: :class:`gcloud.datastore.connection.Connection`\n :param dataset_id: Optional. The dataset ID used to connect to datastore.\n If not passed, inferred from the environment.\n\n :raises: EnvironmentError if ``connection`` or ``dataset_id`` not passed,\n and cannot be inferred from the environment. ValueError if\n one or more entities has a key with a dataset ID not matching\n the passed / inferred dataset ID.\n \"\"\"\n if isinstance(entities, Entity):\n raise ValueError(\"Pass a sequence of entities\")\n\n if not entities:\n return\n\n connection = _require_connection(connection)\n dataset_id = _require_dataset_id(dataset_id, entities[0].key)\n\n current = Batch.current()\n in_batch = current is not None\n if not in_batch:\n current = Batch(dataset_id=dataset_id, connection=connection)\n for entity in entities:\n current.put(entity)\n if not in_batch:\n current.commit()\n\n\ndef delete(keys, connection=None, dataset_id=None):\n \"\"\"Delete the keys in the Cloud Datastore.\n\n :type keys: list of :class:`gcloud.datastore.key.Key`\n :param keys: The keys to be deleted from the datastore.\n\n :type connection: :class:`gcloud.datastore.connection.Connection`\n :param connection: Optional connection used to connect to datastore.\n If not passed, inferred from the environment.\n\n :type dataset_id: :class:`gcloud.datastore.connection.Connection`\n :param dataset_id: Optional. The dataset ID used to connect to datastore.\n If not passed, inferred from the environment.\n\n :raises: EnvironmentError if ``connection`` or ``dataset_id`` not passed,\n and cannot be inferred from the environment. ValueError if\n one or more keys has a dataset ID not matching the passed /\n inferred dataset ID.\n \"\"\"\n if not keys:\n return\n\n connection = _require_connection(connection)\n dataset_id = _require_dataset_id(dataset_id, keys[0])\n\n # We allow partial keys to attempt a delete, the backend will fail.\n current = Batch.current()\n in_batch = current is not None\n if not in_batch:\n current = Batch(dataset_id=dataset_id, connection=connection)\n for key in keys:\n current.delete(key)\n if not in_batch:\n current.commit()\n\n\ndef allocate_ids(incomplete_key, num_ids, connection=None):\n \"\"\"Allocates a list of IDs from a partial key.\n\n :type incomplete_key: A :class:`gcloud.datastore.key.Key`\n :param incomplete_key: Partial key to use as base for allocated IDs.\n\n :type num_ids: integer\n :param num_ids: The number of IDs to allocate.\n\n :type connection: :class:`gcloud.datastore.connection.Connection`\n :param connection: Optional. The connection used to connect to datastore.\n\n :rtype: list of :class:`gcloud.datastore.key.Key`\n :returns: The (complete) keys allocated with ``incomplete_key`` as root.\n :raises: :class:`ValueError` if ``incomplete_key`` is not a partial key.\n \"\"\"\n connection = _require_connection(connection)\n\n if not incomplete_key.is_partial:\n raise ValueError(('Key is not partial.', incomplete_key))\n\n incomplete_key_pb = incomplete_key.to_protobuf()\n incomplete_key_pbs = [incomplete_key_pb] * num_ids\n\n allocated_key_pbs = connection.allocate_ids(incomplete_key.dataset_id,\n incomplete_key_pbs)\n allocated_ids = [allocated_key_pb.path_element[-1].id\n for allocated_key_pb in allocated_key_pbs]\n return [incomplete_key.completed_key(allocated_id)\n for allocated_id in allocated_ids]\n", "path": "gcloud/datastore/api.py"}]}
| 3,958 | 163 |
gh_patches_debug_18748
|
rasdani/github-patches
|
git_diff
|
microsoft__PubSec-Info-Assistant-356
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Text Enrichment function not quoting blob paths correctly
We have some files with percentage (%) symbols in them, which appear to cause an issue when getting to the Text Enrichment stage of the Function App due to the way the `get_blob_and_sas` function works. Example file name: `Unemployment rate back up to 3.7% in October _ Australian Bureau of Statistics.pdf`
I would suggest replacing the code that manually substitutes spaces (below) with a proper URL quoting function like `blob_path = urllib.parse.quote(blob_path)`
https://github.com/microsoft/PubSec-Info-Assistant/blob/7fa4561652211b023965d4522b2bfd7168af4060/functions/shared_code/utilities_helper.py#L52
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `functions/shared_code/utilities_helper.py`
Content:
```
1 # Copyright (c) Microsoft Corporation.
2 # Licensed under the MIT license.
3
4 import os
5 import logging
6 from datetime import datetime, timedelta
7 from azure.storage.blob import generate_blob_sas, BlobSasPermissions
8
9 class UtilitiesHelper:
10 """ Helper class for utility functions"""
11 def __init__(self,
12 azure_blob_storage_account,
13 azure_blob_storage_endpoint,
14 azure_blob_storage_key
15 ):
16 self.azure_blob_storage_account = azure_blob_storage_account
17 self.azure_blob_storage_endpoint = azure_blob_storage_endpoint
18 self.azure_blob_storage_key = azure_blob_storage_key
19
20 def get_filename_and_extension(self, path):
21 """ Function to return the file name & type"""
22 # Split the path into base and extension
23 base_name = os.path.basename(path)
24 segments = path.split("/")
25 directory = "/".join(segments[1:-1]) + "/"
26 if directory == "/":
27 directory = ""
28 file_name, file_extension = os.path.splitext(base_name)
29 return file_name, file_extension, directory
30
31 def get_blob_and_sas(self, blob_path):
32 """ Function to retrieve the uri and sas token for a given blob in azure storage"""
33
34 # Get path and file name minus the root container
35 separator = "/"
36 file_path_w_name_no_cont = separator.join(
37 blob_path.split(separator)[1:])
38
39 container_name = separator.join(
40 blob_path.split(separator)[0:1])
41
42 # Gen SAS token
43 sas_token = generate_blob_sas(
44 account_name=self.azure_blob_storage_account,
45 container_name=container_name,
46 blob_name=file_path_w_name_no_cont,
47 account_key=self.azure_blob_storage_key,
48 permission=BlobSasPermissions(read=True),
49 expiry=datetime.utcnow() + timedelta(hours=1)
50 )
51 source_blob_path = f'{self.azure_blob_storage_endpoint}{blob_path}?{sas_token}'
52 source_blob_path = source_blob_path.replace(" ", "%20")
53 logging.info("Path and SAS token for file in azure storage are now generated \n")
54 return source_blob_path
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/functions/shared_code/utilities_helper.py b/functions/shared_code/utilities_helper.py
--- a/functions/shared_code/utilities_helper.py
+++ b/functions/shared_code/utilities_helper.py
@@ -3,6 +3,7 @@
import os
import logging
+import urllib.parse
from datetime import datetime, timedelta
from azure.storage.blob import generate_blob_sas, BlobSasPermissions
@@ -48,7 +49,7 @@
permission=BlobSasPermissions(read=True),
expiry=datetime.utcnow() + timedelta(hours=1)
)
+ blob_path = urllib.parse.quote(blob_path)
source_blob_path = f'{self.azure_blob_storage_endpoint}{blob_path}?{sas_token}'
- source_blob_path = source_blob_path.replace(" ", "%20")
logging.info("Path and SAS token for file in azure storage are now generated \n")
return source_blob_path
\ No newline at end of file
|
{"golden_diff": "diff --git a/functions/shared_code/utilities_helper.py b/functions/shared_code/utilities_helper.py\n--- a/functions/shared_code/utilities_helper.py\n+++ b/functions/shared_code/utilities_helper.py\n@@ -3,6 +3,7 @@\n \n import os\n import logging\n+import urllib.parse\n from datetime import datetime, timedelta\n from azure.storage.blob import generate_blob_sas, BlobSasPermissions\n \n@@ -48,7 +49,7 @@\n permission=BlobSasPermissions(read=True),\n expiry=datetime.utcnow() + timedelta(hours=1)\n )\n+ blob_path = urllib.parse.quote(blob_path)\n source_blob_path = f'{self.azure_blob_storage_endpoint}{blob_path}?{sas_token}'\n- source_blob_path = source_blob_path.replace(\" \", \"%20\")\n logging.info(\"Path and SAS token for file in azure storage are now generated \\n\")\n return source_blob_path\n\\ No newline at end of file\n", "issue": "Text Enrichment function not quoting blob paths correctly\nWe have some files with percentage (%) symbols in them, which appear to cause an issue when getting to the Text Enrichment stage of the Function App due to the way the `get_blob_and_sas` function works. Example file name: `Unemployment rate back up to 3.7% in October _ Australian Bureau of Statistics.pdf`\r\n\r\nI would suggest replacing the code that manually substitutes spaces (below) with a proper URL quoting function like `blob_path = urllib.parse.quote(blob_path)`\r\n\r\nhttps://github.com/microsoft/PubSec-Info-Assistant/blob/7fa4561652211b023965d4522b2bfd7168af4060/functions/shared_code/utilities_helper.py#L52\r\n\r\n\r\n\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT license.\n\nimport os\nimport logging\nfrom datetime import datetime, timedelta\nfrom azure.storage.blob import generate_blob_sas, BlobSasPermissions\n\nclass UtilitiesHelper:\n \"\"\" Helper class for utility functions\"\"\"\n def __init__(self,\n azure_blob_storage_account,\n azure_blob_storage_endpoint,\n azure_blob_storage_key\n ):\n self.azure_blob_storage_account = azure_blob_storage_account\n self.azure_blob_storage_endpoint = azure_blob_storage_endpoint\n self.azure_blob_storage_key = azure_blob_storage_key\n \n def get_filename_and_extension(self, path):\n \"\"\" Function to return the file name & type\"\"\"\n # Split the path into base and extension\n base_name = os.path.basename(path)\n segments = path.split(\"/\")\n directory = \"/\".join(segments[1:-1]) + \"/\"\n if directory == \"/\":\n directory = \"\"\n file_name, file_extension = os.path.splitext(base_name)\n return file_name, file_extension, directory\n \n def get_blob_and_sas(self, blob_path):\n \"\"\" Function to retrieve the uri and sas token for a given blob in azure storage\"\"\"\n\n # Get path and file name minus the root container\n separator = \"/\"\n file_path_w_name_no_cont = separator.join(\n blob_path.split(separator)[1:])\n \n container_name = separator.join(\n blob_path.split(separator)[0:1])\n\n # Gen SAS token\n sas_token = generate_blob_sas(\n account_name=self.azure_blob_storage_account,\n container_name=container_name,\n blob_name=file_path_w_name_no_cont,\n account_key=self.azure_blob_storage_key,\n permission=BlobSasPermissions(read=True),\n expiry=datetime.utcnow() + timedelta(hours=1)\n )\n source_blob_path = f'{self.azure_blob_storage_endpoint}{blob_path}?{sas_token}'\n source_blob_path = source_blob_path.replace(\" \", \"%20\")\n logging.info(\"Path and SAS token for file in azure storage are now generated \\n\")\n return source_blob_path", "path": "functions/shared_code/utilities_helper.py"}], "after_files": [{"content": "# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT license.\n\nimport os\nimport logging\nimport urllib.parse\nfrom datetime import datetime, timedelta\nfrom azure.storage.blob import generate_blob_sas, BlobSasPermissions\n\nclass UtilitiesHelper:\n \"\"\" Helper class for utility functions\"\"\"\n def __init__(self,\n azure_blob_storage_account,\n azure_blob_storage_endpoint,\n azure_blob_storage_key\n ):\n self.azure_blob_storage_account = azure_blob_storage_account\n self.azure_blob_storage_endpoint = azure_blob_storage_endpoint\n self.azure_blob_storage_key = azure_blob_storage_key\n \n def get_filename_and_extension(self, path):\n \"\"\" Function to return the file name & type\"\"\"\n # Split the path into base and extension\n base_name = os.path.basename(path)\n segments = path.split(\"/\")\n directory = \"/\".join(segments[1:-1]) + \"/\"\n if directory == \"/\":\n directory = \"\"\n file_name, file_extension = os.path.splitext(base_name)\n return file_name, file_extension, directory\n \n def get_blob_and_sas(self, blob_path):\n \"\"\" Function to retrieve the uri and sas token for a given blob in azure storage\"\"\"\n\n # Get path and file name minus the root container\n separator = \"/\"\n file_path_w_name_no_cont = separator.join(\n blob_path.split(separator)[1:])\n \n container_name = separator.join(\n blob_path.split(separator)[0:1])\n\n # Gen SAS token\n sas_token = generate_blob_sas(\n account_name=self.azure_blob_storage_account,\n container_name=container_name,\n blob_name=file_path_w_name_no_cont,\n account_key=self.azure_blob_storage_key,\n permission=BlobSasPermissions(read=True),\n expiry=datetime.utcnow() + timedelta(hours=1)\n )\n blob_path = urllib.parse.quote(blob_path)\n source_blob_path = f'{self.azure_blob_storage_endpoint}{blob_path}?{sas_token}'\n logging.info(\"Path and SAS token for file in azure storage are now generated \\n\")\n return source_blob_path", "path": "functions/shared_code/utilities_helper.py"}]}
| 975 | 200 |
gh_patches_debug_27874
|
rasdani/github-patches
|
git_diff
|
cloud-custodian__cloud-custodian-7673
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Any way to filter on tags for Cognito identity-pool or user-pool?
### Discussed in https://github.com/orgs/cloud-custodian/discussions/7616
<div type='discussions-op-text'>
<sup>Originally posted by **stepkirk** August 5, 2022</sup>
We normally enforce tags on AWS resources by using Custodian to look for certain required tags on a resource and then, if the tags don't exist or aren't in the correct format, we mark the resource for deletion after a certain grace period. With the Cognito identity-pool and user-pool resources, it doesn't look like we can check for tags the normal way and it doesn't look like we can mark a resource for later deletion. Is that true or am I missing something?
Any plans to add tagging/marking support in the future for these Cognito resources?</div>
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `c7n/resources/cognito.py`
Content:
```
1 # Copyright The Cloud Custodian Authors.
2 # SPDX-License-Identifier: Apache-2.0
3 from botocore.exceptions import ClientError
4
5 from c7n.actions import BaseAction
6 from c7n.manager import resources
7 from c7n.query import QueryResourceManager, TypeInfo
8 from c7n.utils import local_session, type_schema
9
10
11 @resources.register('identity-pool')
12 class CognitoIdentityPool(QueryResourceManager):
13
14 class resource_type(TypeInfo):
15 service = 'cognito-identity'
16 enum_spec = ('list_identity_pools', 'IdentityPools', {'MaxResults': 60})
17 detail_spec = (
18 'describe_identity_pool', 'IdentityPoolId', 'IdentityPoolId', None)
19 id = 'IdentityPoolId'
20 name = 'IdentityPoolName'
21 arn_type = "identitypool"
22 cfn_type = 'AWS::Cognito::IdentityPool'
23
24
25 @CognitoIdentityPool.action_registry.register('delete')
26 class DeleteIdentityPool(BaseAction):
27 """Action to delete cognito identity pool
28
29 It is recommended to use a filter to avoid unwanted deletion of pools
30
31 :example:
32
33 .. code-block:: yaml
34
35 policies:
36 - name: identity-pool-delete
37 resource: identity-pool
38 actions:
39 - delete
40 """
41
42 schema = type_schema('delete')
43 permissions = ("cognito-identity:DeleteIdentityPool",)
44
45 def process(self, pools):
46 with self.executor_factory(max_workers=2) as w:
47 list(w.map(self.process_pool, pools))
48
49 def process_pool(self, pool):
50 client = local_session(
51 self.manager.session_factory).client('cognito-identity')
52 try:
53 client.delete_identity_pool(IdentityPoolId=pool['IdentityPoolId'])
54 except ClientError as e:
55 self.log.exception(
56 "Exception deleting identity pool:\n %s" % e)
57
58
59 @resources.register('user-pool')
60 class CognitoUserPool(QueryResourceManager):
61
62 class resource_type(TypeInfo):
63 service = "cognito-idp"
64 enum_spec = ('list_user_pools', 'UserPools', {'MaxResults': 60})
65 detail_spec = (
66 'describe_user_pool', 'UserPoolId', 'Id', 'UserPool')
67 id = 'Id'
68 name = 'Name'
69 arn_type = "userpool"
70 cfn_type = 'AWS::Cognito::UserPool'
71
72
73 @CognitoUserPool.action_registry.register('delete')
74 class DeleteUserPool(BaseAction):
75 """Action to delete cognito user pool
76
77 It is recommended to use a filter to avoid unwanted deletion of pools
78
79 :example:
80
81 .. code-block:: yaml
82
83 policies:
84 - name: user-pool-delete
85 resource: user-pool
86 actions:
87 - delete
88 """
89
90 schema = type_schema('delete')
91 permissions = ("cognito-idp:DeleteUserPool",)
92
93 def process(self, pools):
94 with self.executor_factory(max_workers=2) as w:
95 list(w.map(self.process_pool, pools))
96
97 def process_pool(self, pool):
98 client = local_session(
99 self.manager.session_factory).client('cognito-idp')
100 try:
101 client.delete_user_pool(UserPoolId=pool['Id'])
102 except ClientError as e:
103 self.log.exception(
104 "Exception deleting user pool:\n %s" % e)
105
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/c7n/resources/cognito.py b/c7n/resources/cognito.py
--- a/c7n/resources/cognito.py
+++ b/c7n/resources/cognito.py
@@ -4,10 +4,21 @@
from c7n.actions import BaseAction
from c7n.manager import resources
-from c7n.query import QueryResourceManager, TypeInfo
+from c7n.query import QueryResourceManager, TypeInfo, DescribeSource
+from c7n.tags import universal_augment
from c7n.utils import local_session, type_schema
+class DescribeIdentityPool(DescribeSource):
+ def augment(self, resources):
+ return universal_augment(self.manager, resources)
+
+
+class DescribeUserPool(DescribeSource):
+ def augment(self, resources):
+ return universal_augment(self.manager, resources)
+
+
@resources.register('identity-pool')
class CognitoIdentityPool(QueryResourceManager):
@@ -20,6 +31,11 @@
name = 'IdentityPoolName'
arn_type = "identitypool"
cfn_type = 'AWS::Cognito::IdentityPool'
+ universal_taggable = object()
+
+ source_mapping = {
+ 'describe': DescribeIdentityPool,
+ }
@CognitoIdentityPool.action_registry.register('delete')
@@ -69,6 +85,10 @@
arn_type = "userpool"
cfn_type = 'AWS::Cognito::UserPool'
+ source_mapping = {
+ 'describe': DescribeUserPool,
+ }
+
@CognitoUserPool.action_registry.register('delete')
class DeleteUserPool(BaseAction):
|
{"golden_diff": "diff --git a/c7n/resources/cognito.py b/c7n/resources/cognito.py\n--- a/c7n/resources/cognito.py\n+++ b/c7n/resources/cognito.py\n@@ -4,10 +4,21 @@\n \n from c7n.actions import BaseAction\n from c7n.manager import resources\n-from c7n.query import QueryResourceManager, TypeInfo\n+from c7n.query import QueryResourceManager, TypeInfo, DescribeSource\n+from c7n.tags import universal_augment\n from c7n.utils import local_session, type_schema\n \n \n+class DescribeIdentityPool(DescribeSource):\n+ def augment(self, resources):\n+ return universal_augment(self.manager, resources)\n+\n+\n+class DescribeUserPool(DescribeSource):\n+ def augment(self, resources):\n+ return universal_augment(self.manager, resources)\n+\n+\n @resources.register('identity-pool')\n class CognitoIdentityPool(QueryResourceManager):\n \n@@ -20,6 +31,11 @@\n name = 'IdentityPoolName'\n arn_type = \"identitypool\"\n cfn_type = 'AWS::Cognito::IdentityPool'\n+ universal_taggable = object()\n+\n+ source_mapping = {\n+ 'describe': DescribeIdentityPool,\n+ }\n \n \n @CognitoIdentityPool.action_registry.register('delete')\n@@ -69,6 +85,10 @@\n arn_type = \"userpool\"\n cfn_type = 'AWS::Cognito::UserPool'\n \n+ source_mapping = {\n+ 'describe': DescribeUserPool,\n+ }\n+\n \n @CognitoUserPool.action_registry.register('delete')\n class DeleteUserPool(BaseAction):\n", "issue": "Any way to filter on tags for Cognito identity-pool or user-pool?\n### Discussed in https://github.com/orgs/cloud-custodian/discussions/7616\r\n\r\n<div type='discussions-op-text'>\r\n\r\n<sup>Originally posted by **stepkirk** August 5, 2022</sup>\r\nWe normally enforce tags on AWS resources by using Custodian to look for certain required tags on a resource and then, if the tags don't exist or aren't in the correct format, we mark the resource for deletion after a certain grace period. With the Cognito identity-pool and user-pool resources, it doesn't look like we can check for tags the normal way and it doesn't look like we can mark a resource for later deletion. Is that true or am I missing something?\r\n\r\nAny plans to add tagging/marking support in the future for these Cognito resources?</div>\n", "before_files": [{"content": "# Copyright The Cloud Custodian Authors.\n# SPDX-License-Identifier: Apache-2.0\nfrom botocore.exceptions import ClientError\n\nfrom c7n.actions import BaseAction\nfrom c7n.manager import resources\nfrom c7n.query import QueryResourceManager, TypeInfo\nfrom c7n.utils import local_session, type_schema\n\n\[email protected]('identity-pool')\nclass CognitoIdentityPool(QueryResourceManager):\n\n class resource_type(TypeInfo):\n service = 'cognito-identity'\n enum_spec = ('list_identity_pools', 'IdentityPools', {'MaxResults': 60})\n detail_spec = (\n 'describe_identity_pool', 'IdentityPoolId', 'IdentityPoolId', None)\n id = 'IdentityPoolId'\n name = 'IdentityPoolName'\n arn_type = \"identitypool\"\n cfn_type = 'AWS::Cognito::IdentityPool'\n\n\[email protected]_registry.register('delete')\nclass DeleteIdentityPool(BaseAction):\n \"\"\"Action to delete cognito identity pool\n\n It is recommended to use a filter to avoid unwanted deletion of pools\n\n :example:\n\n .. code-block:: yaml\n\n policies:\n - name: identity-pool-delete\n resource: identity-pool\n actions:\n - delete\n \"\"\"\n\n schema = type_schema('delete')\n permissions = (\"cognito-identity:DeleteIdentityPool\",)\n\n def process(self, pools):\n with self.executor_factory(max_workers=2) as w:\n list(w.map(self.process_pool, pools))\n\n def process_pool(self, pool):\n client = local_session(\n self.manager.session_factory).client('cognito-identity')\n try:\n client.delete_identity_pool(IdentityPoolId=pool['IdentityPoolId'])\n except ClientError as e:\n self.log.exception(\n \"Exception deleting identity pool:\\n %s\" % e)\n\n\[email protected]('user-pool')\nclass CognitoUserPool(QueryResourceManager):\n\n class resource_type(TypeInfo):\n service = \"cognito-idp\"\n enum_spec = ('list_user_pools', 'UserPools', {'MaxResults': 60})\n detail_spec = (\n 'describe_user_pool', 'UserPoolId', 'Id', 'UserPool')\n id = 'Id'\n name = 'Name'\n arn_type = \"userpool\"\n cfn_type = 'AWS::Cognito::UserPool'\n\n\[email protected]_registry.register('delete')\nclass DeleteUserPool(BaseAction):\n \"\"\"Action to delete cognito user pool\n\n It is recommended to use a filter to avoid unwanted deletion of pools\n\n :example:\n\n .. code-block:: yaml\n\n policies:\n - name: user-pool-delete\n resource: user-pool\n actions:\n - delete\n \"\"\"\n\n schema = type_schema('delete')\n permissions = (\"cognito-idp:DeleteUserPool\",)\n\n def process(self, pools):\n with self.executor_factory(max_workers=2) as w:\n list(w.map(self.process_pool, pools))\n\n def process_pool(self, pool):\n client = local_session(\n self.manager.session_factory).client('cognito-idp')\n try:\n client.delete_user_pool(UserPoolId=pool['Id'])\n except ClientError as e:\n self.log.exception(\n \"Exception deleting user pool:\\n %s\" % e)\n", "path": "c7n/resources/cognito.py"}], "after_files": [{"content": "# Copyright The Cloud Custodian Authors.\n# SPDX-License-Identifier: Apache-2.0\nfrom botocore.exceptions import ClientError\n\nfrom c7n.actions import BaseAction\nfrom c7n.manager import resources\nfrom c7n.query import QueryResourceManager, TypeInfo, DescribeSource\nfrom c7n.tags import universal_augment\nfrom c7n.utils import local_session, type_schema\n\n\nclass DescribeIdentityPool(DescribeSource):\n def augment(self, resources):\n return universal_augment(self.manager, resources)\n\n\nclass DescribeUserPool(DescribeSource):\n def augment(self, resources):\n return universal_augment(self.manager, resources)\n\n\[email protected]('identity-pool')\nclass CognitoIdentityPool(QueryResourceManager):\n\n class resource_type(TypeInfo):\n service = 'cognito-identity'\n enum_spec = ('list_identity_pools', 'IdentityPools', {'MaxResults': 60})\n detail_spec = (\n 'describe_identity_pool', 'IdentityPoolId', 'IdentityPoolId', None)\n id = 'IdentityPoolId'\n name = 'IdentityPoolName'\n arn_type = \"identitypool\"\n cfn_type = 'AWS::Cognito::IdentityPool'\n universal_taggable = object()\n\n source_mapping = {\n 'describe': DescribeIdentityPool,\n }\n\n\[email protected]_registry.register('delete')\nclass DeleteIdentityPool(BaseAction):\n \"\"\"Action to delete cognito identity pool\n\n It is recommended to use a filter to avoid unwanted deletion of pools\n\n :example:\n\n .. code-block:: yaml\n\n policies:\n - name: identity-pool-delete\n resource: identity-pool\n actions:\n - delete\n \"\"\"\n\n schema = type_schema('delete')\n permissions = (\"cognito-identity:DeleteIdentityPool\",)\n\n def process(self, pools):\n with self.executor_factory(max_workers=2) as w:\n list(w.map(self.process_pool, pools))\n\n def process_pool(self, pool):\n client = local_session(\n self.manager.session_factory).client('cognito-identity')\n try:\n client.delete_identity_pool(IdentityPoolId=pool['IdentityPoolId'])\n except ClientError as e:\n self.log.exception(\n \"Exception deleting identity pool:\\n %s\" % e)\n\n\[email protected]('user-pool')\nclass CognitoUserPool(QueryResourceManager):\n\n class resource_type(TypeInfo):\n service = \"cognito-idp\"\n enum_spec = ('list_user_pools', 'UserPools', {'MaxResults': 60})\n detail_spec = (\n 'describe_user_pool', 'UserPoolId', 'Id', 'UserPool')\n id = 'Id'\n name = 'Name'\n arn_type = \"userpool\"\n cfn_type = 'AWS::Cognito::UserPool'\n\n source_mapping = {\n 'describe': DescribeUserPool,\n }\n\n\[email protected]_registry.register('delete')\nclass DeleteUserPool(BaseAction):\n \"\"\"Action to delete cognito user pool\n\n It is recommended to use a filter to avoid unwanted deletion of pools\n\n :example:\n\n .. code-block:: yaml\n\n policies:\n - name: user-pool-delete\n resource: user-pool\n actions:\n - delete\n \"\"\"\n\n schema = type_schema('delete')\n permissions = (\"cognito-idp:DeleteUserPool\",)\n\n def process(self, pools):\n with self.executor_factory(max_workers=2) as w:\n list(w.map(self.process_pool, pools))\n\n def process_pool(self, pool):\n client = local_session(\n self.manager.session_factory).client('cognito-idp')\n try:\n client.delete_user_pool(UserPoolId=pool['Id'])\n except ClientError as e:\n self.log.exception(\n \"Exception deleting user pool:\\n %s\" % e)\n", "path": "c7n/resources/cognito.py"}]}
| 1,391 | 355 |
gh_patches_debug_19662
|
rasdani/github-patches
|
git_diff
|
open-telemetry__opentelemetry-python-contrib-1400
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Fix ConfluentKafkaInstrumentor usage
**Describe your environment**
Docker image running `python:3.10.7-slim-bullseye` as base
**Steps to reproduce**
```python3
from opentelemetry.instrumentation.confluentkafka import ConfluentKafkaInstrumentor
# Instrument kafka
ConfluentKafkaInstrumentor().instrument()
```
**What is the expected behavior?**
Instrumentation to work just by adding the lines above.
**What is the actual behavior?**
> ModuleNotFoundError: No module named 'opentelemetry.instrumentation.confluentkafka'
**Additional context**
The solution is to actually make the import like this:
```python3
from opentelemetry.instrumentation.confluent_kafka import ConfluentKafkaInstrumentor
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `instrumentation/opentelemetry-instrumentation-confluent-kafka/src/opentelemetry/instrumentation/confluent_kafka/__init__.py`
Content:
```
1 # Copyright The OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """
16 Instrument `confluent-kafka-python` to report instrumentation-confluent-kafka produced and consumed messages
17
18 Usage
19 -----
20
21 ..code:: python
22
23 from opentelemetry.instrumentation.confluentkafka import ConfluentKafkaInstrumentor
24 from confluent_kafka import Producer, Consumer
25
26 # Instrument kafka
27 ConfluentKafkaInstrumentor().instrument()
28
29 # report a span of type producer with the default settings
30 conf1 = {'bootstrap.servers': "localhost:9092"}
31 producer = Producer(conf1)
32 producer.produce('my-topic',b'raw_bytes')
33
34 conf2 = {'bootstrap.servers': "localhost:9092",
35 'group.id': "foo",
36 'auto.offset.reset': 'smallest'}
37 # report a span of type consumer with the default settings
38 consumer = Consumer(conf2)
39 def basic_consume_loop(consumer, topics):
40 try:
41 consumer.subscribe(topics)
42 running = True
43 while running:
44 msg = consumer.poll(timeout=1.0)
45 if msg is None: continue
46
47 if msg.error():
48 if msg.error().code() == KafkaError._PARTITION_EOF:
49 # End of partition event
50 sys.stderr.write(f"{msg.topic()} [{msg.partition()}] reached end at offset {msg.offset()}}\n")
51 elif msg.error():
52 raise KafkaException(msg.error())
53 else:
54 msg_process(msg)
55 finally:
56 # Close down consumer to commit final offsets.
57 consumer.close()
58
59 basic_consume_loop(consumer, "my-topic")
60
61
62 The `_instrument` method accepts the following keyword args:
63 tracer_provider (TracerProvider) - an optional tracer provider
64 instrument_producer (Callable) - a function with extra user-defined logic to be performed before sending the message
65 this function signature is:
66 def instrument_producer(producer: Producer, tracer_provider=None)
67 instrument_consumer (Callable) - a function with extra user-defined logic to be performed after consuming a message
68 this function signature is:
69 def instrument_consumer(consumer: Consumer, tracer_provider=None)
70 for example:
71 .. code: python
72 from opentelemetry.instrumentation.confluentkafka import ConfluentKafkaInstrumentor
73 from confluent_kafka import Producer, Consumer
74
75 inst = ConfluentKafkaInstrumentor()
76
77 p = confluent_kafka.Producer({'bootstrap.servers': 'localhost:29092'})
78 c = confluent_kafka.Consumer({
79 'bootstrap.servers': 'localhost:29092',
80 'group.id': 'mygroup',
81 'auto.offset.reset': 'earliest'
82 })
83
84 # instrument confluent kafka with produce and consume hooks
85 p = inst.instrument_producer(p, tracer_provider)
86 c = inst.instrument_consumer(c, tracer_provider=tracer_provider)
87
88
89 # Using kafka as normal now will automatically generate spans,
90 # including user custom attributes added from the hooks
91 conf = {'bootstrap.servers': "localhost:9092"}
92 p.produce('my-topic',b'raw_bytes')
93 msg = c.poll()
94
95
96 API
97 ___
98 """
99 from typing import Collection
100
101 import confluent_kafka
102 import wrapt
103 from confluent_kafka import Consumer, Producer
104
105 from opentelemetry import context, propagate, trace
106 from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
107 from opentelemetry.instrumentation.utils import unwrap
108 from opentelemetry.semconv.trace import MessagingOperationValues
109 from opentelemetry.trace import Link, SpanKind, Tracer
110
111 from .package import _instruments
112 from .utils import (
113 KafkaPropertiesExtractor,
114 _enrich_span,
115 _get_span_name,
116 _kafka_getter,
117 _kafka_setter,
118 )
119 from .version import __version__
120
121
122 class AutoInstrumentedProducer(Producer):
123
124 # This method is deliberately implemented in order to allow wrapt to wrap this function
125 def produce(
126 self, topic, value=None, *args, **kwargs
127 ): # pylint: disable=keyword-arg-before-vararg,useless-super-delegation
128 super().produce(topic, value, *args, **kwargs)
129
130
131 class AutoInstrumentedConsumer(Consumer):
132 def __init__(self, config):
133 super().__init__(config)
134 self._current_consume_span = None
135
136 # This method is deliberately implemented in order to allow wrapt to wrap this function
137 def poll(self, timeout=-1): # pylint: disable=useless-super-delegation
138 return super().poll(timeout)
139
140
141 class ProxiedProducer(Producer):
142 def __init__(self, producer: Producer, tracer: Tracer):
143 self._producer = producer
144 self._tracer = tracer
145
146 def flush(self, timeout=-1):
147 self._producer.flush(timeout)
148
149 def poll(self, timeout=-1):
150 self._producer.poll(timeout)
151
152 def produce(
153 self, topic, value=None, *args, **kwargs
154 ): # pylint: disable=keyword-arg-before-vararg
155 new_kwargs = kwargs.copy()
156 new_kwargs["topic"] = topic
157 new_kwargs["value"] = value
158
159 return ConfluentKafkaInstrumentor.wrap_produce(
160 self._producer.produce, self, self._tracer, args, new_kwargs
161 )
162
163 def original_producer(self):
164 return self._producer
165
166
167 class ProxiedConsumer(Consumer):
168 def __init__(self, consumer: Consumer, tracer: Tracer):
169 self._consumer = consumer
170 self._tracer = tracer
171 self._current_consume_span = None
172 self._current_context_token = None
173
174 def committed(self, partitions, timeout=-1):
175 return self._consumer.committed(partitions, timeout)
176
177 def consume(
178 self, num_messages=1, *args, **kwargs
179 ): # pylint: disable=keyword-arg-before-vararg
180 return self._consumer.consume(num_messages, *args, **kwargs)
181
182 def get_watermark_offsets(
183 self, partition, timeout=-1, *args, **kwargs
184 ): # pylint: disable=keyword-arg-before-vararg
185 return self._consumer.get_watermark_offsets(
186 partition, timeout, *args, **kwargs
187 )
188
189 def offsets_for_times(self, partitions, timeout=-1):
190 return self._consumer.offsets_for_times(partitions, timeout)
191
192 def poll(self, timeout=-1):
193 return ConfluentKafkaInstrumentor.wrap_poll(
194 self._consumer.poll, self, self._tracer, [timeout], {}
195 )
196
197 def subscribe(
198 self, topics, on_assign=lambda *args: None, *args, **kwargs
199 ): # pylint: disable=keyword-arg-before-vararg
200 self._consumer.subscribe(topics, on_assign, *args, **kwargs)
201
202 def original_consumer(self):
203 return self._consumer
204
205
206 class ConfluentKafkaInstrumentor(BaseInstrumentor):
207 """An instrumentor for confluent kafka module
208 See `BaseInstrumentor`
209 """
210
211 # pylint: disable=attribute-defined-outside-init
212 @staticmethod
213 def instrument_producer(
214 producer: Producer, tracer_provider=None
215 ) -> ProxiedProducer:
216 tracer = trace.get_tracer(
217 __name__, __version__, tracer_provider=tracer_provider
218 )
219
220 manual_producer = ProxiedProducer(producer, tracer)
221
222 return manual_producer
223
224 @staticmethod
225 def instrument_consumer(
226 consumer: Consumer, tracer_provider=None
227 ) -> ProxiedConsumer:
228 tracer = trace.get_tracer(
229 __name__, __version__, tracer_provider=tracer_provider
230 )
231
232 manual_consumer = ProxiedConsumer(consumer, tracer)
233
234 return manual_consumer
235
236 @staticmethod
237 def uninstrument_producer(producer: Producer) -> Producer:
238 if isinstance(producer, ProxiedProducer):
239 return producer.original_producer()
240 return producer
241
242 @staticmethod
243 def uninstrument_consumer(consumer: Consumer) -> Consumer:
244 if isinstance(consumer, ProxiedConsumer):
245 return consumer.original_consumer()
246 return consumer
247
248 def instrumentation_dependencies(self) -> Collection[str]:
249 return _instruments
250
251 def _instrument(self, **kwargs):
252 self._original_kafka_producer = confluent_kafka.Producer
253 self._original_kafka_consumer = confluent_kafka.Consumer
254
255 confluent_kafka.Producer = AutoInstrumentedProducer
256 confluent_kafka.Consumer = AutoInstrumentedConsumer
257
258 tracer_provider = kwargs.get("tracer_provider")
259 tracer = trace.get_tracer(
260 __name__, __version__, tracer_provider=tracer_provider
261 )
262
263 self._tracer = tracer
264
265 def _inner_wrap_produce(func, instance, args, kwargs):
266 return ConfluentKafkaInstrumentor.wrap_produce(
267 func, instance, self._tracer, args, kwargs
268 )
269
270 def _inner_wrap_poll(func, instance, args, kwargs):
271 return ConfluentKafkaInstrumentor.wrap_poll(
272 func, instance, self._tracer, args, kwargs
273 )
274
275 wrapt.wrap_function_wrapper(
276 AutoInstrumentedProducer,
277 "produce",
278 _inner_wrap_produce,
279 )
280
281 wrapt.wrap_function_wrapper(
282 AutoInstrumentedConsumer,
283 "poll",
284 _inner_wrap_poll,
285 )
286
287 def _uninstrument(self, **kwargs):
288 confluent_kafka.Producer = self._original_kafka_producer
289 confluent_kafka.Consumer = self._original_kafka_consumer
290
291 unwrap(AutoInstrumentedProducer, "produce")
292 unwrap(AutoInstrumentedConsumer, "poll")
293
294 @staticmethod
295 def wrap_produce(func, instance, tracer, args, kwargs):
296 topic = kwargs.get("topic")
297 if not topic:
298 topic = args[0]
299
300 span_name = _get_span_name("send", topic)
301 with tracer.start_as_current_span(
302 name=span_name, kind=trace.SpanKind.PRODUCER
303 ) as span:
304 headers = KafkaPropertiesExtractor.extract_produce_headers(
305 args, kwargs
306 )
307 if headers is None:
308 headers = []
309 kwargs["headers"] = headers
310
311 topic = KafkaPropertiesExtractor.extract_produce_topic(args)
312 _enrich_span(
313 span,
314 topic,
315 operation=MessagingOperationValues.RECEIVE,
316 ) # Replace
317 propagate.inject(
318 headers,
319 setter=_kafka_setter,
320 )
321 return func(*args, **kwargs)
322
323 @staticmethod
324 def wrap_poll(func, instance, tracer, args, kwargs):
325 if instance._current_consume_span:
326 context.detach(instance._current_context_token)
327 instance._current_context_token = None
328 instance._current_consume_span.end()
329 instance._current_consume_span = None
330
331 with tracer.start_as_current_span(
332 "recv", end_on_exit=True, kind=trace.SpanKind.CONSUMER
333 ):
334 record = func(*args, **kwargs)
335 if record:
336 links = []
337 ctx = propagate.extract(record.headers(), getter=_kafka_getter)
338 if ctx:
339 for item in ctx.values():
340 if hasattr(item, "get_span_context"):
341 links.append(Link(context=item.get_span_context()))
342
343 instance._current_consume_span = tracer.start_span(
344 name=f"{record.topic()} process",
345 links=links,
346 kind=SpanKind.CONSUMER,
347 )
348
349 _enrich_span(
350 instance._current_consume_span,
351 record.topic(),
352 record.partition(),
353 record.offset(),
354 operation=MessagingOperationValues.PROCESS,
355 )
356 instance._current_context_token = context.attach(
357 trace.set_span_in_context(instance._current_consume_span)
358 )
359
360 return record
361
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/instrumentation/opentelemetry-instrumentation-confluent-kafka/src/opentelemetry/instrumentation/confluent_kafka/__init__.py b/instrumentation/opentelemetry-instrumentation-confluent-kafka/src/opentelemetry/instrumentation/confluent_kafka/__init__.py
--- a/instrumentation/opentelemetry-instrumentation-confluent-kafka/src/opentelemetry/instrumentation/confluent_kafka/__init__.py
+++ b/instrumentation/opentelemetry-instrumentation-confluent-kafka/src/opentelemetry/instrumentation/confluent_kafka/__init__.py
@@ -20,7 +20,7 @@
..code:: python
- from opentelemetry.instrumentation.confluentkafka import ConfluentKafkaInstrumentor
+ from opentelemetry.instrumentation.confluent_kafka import ConfluentKafkaInstrumentor
from confluent_kafka import Producer, Consumer
# Instrument kafka
@@ -69,7 +69,7 @@
def instrument_consumer(consumer: Consumer, tracer_provider=None)
for example:
.. code: python
- from opentelemetry.instrumentation.confluentkafka import ConfluentKafkaInstrumentor
+ from opentelemetry.instrumentation.confluent_kafka import ConfluentKafkaInstrumentor
from confluent_kafka import Producer, Consumer
inst = ConfluentKafkaInstrumentor()
|
{"golden_diff": "diff --git a/instrumentation/opentelemetry-instrumentation-confluent-kafka/src/opentelemetry/instrumentation/confluent_kafka/__init__.py b/instrumentation/opentelemetry-instrumentation-confluent-kafka/src/opentelemetry/instrumentation/confluent_kafka/__init__.py\n--- a/instrumentation/opentelemetry-instrumentation-confluent-kafka/src/opentelemetry/instrumentation/confluent_kafka/__init__.py\n+++ b/instrumentation/opentelemetry-instrumentation-confluent-kafka/src/opentelemetry/instrumentation/confluent_kafka/__init__.py\n@@ -20,7 +20,7 @@\n \n ..code:: python\n \n- from opentelemetry.instrumentation.confluentkafka import ConfluentKafkaInstrumentor\n+ from opentelemetry.instrumentation.confluent_kafka import ConfluentKafkaInstrumentor\n from confluent_kafka import Producer, Consumer\n \n # Instrument kafka\n@@ -69,7 +69,7 @@\n def instrument_consumer(consumer: Consumer, tracer_provider=None)\n for example:\n .. code: python\n- from opentelemetry.instrumentation.confluentkafka import ConfluentKafkaInstrumentor\n+ from opentelemetry.instrumentation.confluent_kafka import ConfluentKafkaInstrumentor\n from confluent_kafka import Producer, Consumer\n \n inst = ConfluentKafkaInstrumentor()\n", "issue": "Fix ConfluentKafkaInstrumentor usage\n**Describe your environment** \r\nDocker image running `python:3.10.7-slim-bullseye` as base\r\n\r\n**Steps to reproduce**\r\n```python3\r\nfrom opentelemetry.instrumentation.confluentkafka import ConfluentKafkaInstrumentor\r\n\r\n# Instrument kafka\r\nConfluentKafkaInstrumentor().instrument()\r\n```\r\n\r\n**What is the expected behavior?**\r\nInstrumentation to work just by adding the lines above.\r\n\r\n**What is the actual behavior?**\r\n\r\n> ModuleNotFoundError: No module named 'opentelemetry.instrumentation.confluentkafka'\r\n\r\n**Additional context**\r\nThe solution is to actually make the import like this:\r\n```python3\r\nfrom opentelemetry.instrumentation.confluent_kafka import ConfluentKafkaInstrumentor\r\n```\r\n\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nInstrument `confluent-kafka-python` to report instrumentation-confluent-kafka produced and consumed messages\n\nUsage\n-----\n\n..code:: python\n\n from opentelemetry.instrumentation.confluentkafka import ConfluentKafkaInstrumentor\n from confluent_kafka import Producer, Consumer\n\n # Instrument kafka\n ConfluentKafkaInstrumentor().instrument()\n\n # report a span of type producer with the default settings\n conf1 = {'bootstrap.servers': \"localhost:9092\"}\n producer = Producer(conf1)\n producer.produce('my-topic',b'raw_bytes')\n\n conf2 = {'bootstrap.servers': \"localhost:9092\",\n 'group.id': \"foo\",\n 'auto.offset.reset': 'smallest'}\n # report a span of type consumer with the default settings\n consumer = Consumer(conf2)\n def basic_consume_loop(consumer, topics):\n try:\n consumer.subscribe(topics)\n running = True\n while running:\n msg = consumer.poll(timeout=1.0)\n if msg is None: continue\n\n if msg.error():\n if msg.error().code() == KafkaError._PARTITION_EOF:\n # End of partition event\n sys.stderr.write(f\"{msg.topic()} [{msg.partition()}] reached end at offset {msg.offset()}}\\n\")\n elif msg.error():\n raise KafkaException(msg.error())\n else:\n msg_process(msg)\n finally:\n # Close down consumer to commit final offsets.\n consumer.close()\n\n basic_consume_loop(consumer, \"my-topic\")\n\n\nThe `_instrument` method accepts the following keyword args:\ntracer_provider (TracerProvider) - an optional tracer provider\ninstrument_producer (Callable) - a function with extra user-defined logic to be performed before sending the message\n this function signature is:\n def instrument_producer(producer: Producer, tracer_provider=None)\ninstrument_consumer (Callable) - a function with extra user-defined logic to be performed after consuming a message\n this function signature is:\n def instrument_consumer(consumer: Consumer, tracer_provider=None)\nfor example:\n.. code: python\n from opentelemetry.instrumentation.confluentkafka import ConfluentKafkaInstrumentor\n from confluent_kafka import Producer, Consumer\n\n inst = ConfluentKafkaInstrumentor()\n\n p = confluent_kafka.Producer({'bootstrap.servers': 'localhost:29092'})\n c = confluent_kafka.Consumer({\n 'bootstrap.servers': 'localhost:29092',\n 'group.id': 'mygroup',\n 'auto.offset.reset': 'earliest'\n })\n\n # instrument confluent kafka with produce and consume hooks\n p = inst.instrument_producer(p, tracer_provider)\n c = inst.instrument_consumer(c, tracer_provider=tracer_provider)\n\n\n # Using kafka as normal now will automatically generate spans,\n # including user custom attributes added from the hooks\n conf = {'bootstrap.servers': \"localhost:9092\"}\n p.produce('my-topic',b'raw_bytes')\n msg = c.poll()\n\n\nAPI\n___\n\"\"\"\nfrom typing import Collection\n\nimport confluent_kafka\nimport wrapt\nfrom confluent_kafka import Consumer, Producer\n\nfrom opentelemetry import context, propagate, trace\nfrom opentelemetry.instrumentation.instrumentor import BaseInstrumentor\nfrom opentelemetry.instrumentation.utils import unwrap\nfrom opentelemetry.semconv.trace import MessagingOperationValues\nfrom opentelemetry.trace import Link, SpanKind, Tracer\n\nfrom .package import _instruments\nfrom .utils import (\n KafkaPropertiesExtractor,\n _enrich_span,\n _get_span_name,\n _kafka_getter,\n _kafka_setter,\n)\nfrom .version import __version__\n\n\nclass AutoInstrumentedProducer(Producer):\n\n # This method is deliberately implemented in order to allow wrapt to wrap this function\n def produce(\n self, topic, value=None, *args, **kwargs\n ): # pylint: disable=keyword-arg-before-vararg,useless-super-delegation\n super().produce(topic, value, *args, **kwargs)\n\n\nclass AutoInstrumentedConsumer(Consumer):\n def __init__(self, config):\n super().__init__(config)\n self._current_consume_span = None\n\n # This method is deliberately implemented in order to allow wrapt to wrap this function\n def poll(self, timeout=-1): # pylint: disable=useless-super-delegation\n return super().poll(timeout)\n\n\nclass ProxiedProducer(Producer):\n def __init__(self, producer: Producer, tracer: Tracer):\n self._producer = producer\n self._tracer = tracer\n\n def flush(self, timeout=-1):\n self._producer.flush(timeout)\n\n def poll(self, timeout=-1):\n self._producer.poll(timeout)\n\n def produce(\n self, topic, value=None, *args, **kwargs\n ): # pylint: disable=keyword-arg-before-vararg\n new_kwargs = kwargs.copy()\n new_kwargs[\"topic\"] = topic\n new_kwargs[\"value\"] = value\n\n return ConfluentKafkaInstrumentor.wrap_produce(\n self._producer.produce, self, self._tracer, args, new_kwargs\n )\n\n def original_producer(self):\n return self._producer\n\n\nclass ProxiedConsumer(Consumer):\n def __init__(self, consumer: Consumer, tracer: Tracer):\n self._consumer = consumer\n self._tracer = tracer\n self._current_consume_span = None\n self._current_context_token = None\n\n def committed(self, partitions, timeout=-1):\n return self._consumer.committed(partitions, timeout)\n\n def consume(\n self, num_messages=1, *args, **kwargs\n ): # pylint: disable=keyword-arg-before-vararg\n return self._consumer.consume(num_messages, *args, **kwargs)\n\n def get_watermark_offsets(\n self, partition, timeout=-1, *args, **kwargs\n ): # pylint: disable=keyword-arg-before-vararg\n return self._consumer.get_watermark_offsets(\n partition, timeout, *args, **kwargs\n )\n\n def offsets_for_times(self, partitions, timeout=-1):\n return self._consumer.offsets_for_times(partitions, timeout)\n\n def poll(self, timeout=-1):\n return ConfluentKafkaInstrumentor.wrap_poll(\n self._consumer.poll, self, self._tracer, [timeout], {}\n )\n\n def subscribe(\n self, topics, on_assign=lambda *args: None, *args, **kwargs\n ): # pylint: disable=keyword-arg-before-vararg\n self._consumer.subscribe(topics, on_assign, *args, **kwargs)\n\n def original_consumer(self):\n return self._consumer\n\n\nclass ConfluentKafkaInstrumentor(BaseInstrumentor):\n \"\"\"An instrumentor for confluent kafka module\n See `BaseInstrumentor`\n \"\"\"\n\n # pylint: disable=attribute-defined-outside-init\n @staticmethod\n def instrument_producer(\n producer: Producer, tracer_provider=None\n ) -> ProxiedProducer:\n tracer = trace.get_tracer(\n __name__, __version__, tracer_provider=tracer_provider\n )\n\n manual_producer = ProxiedProducer(producer, tracer)\n\n return manual_producer\n\n @staticmethod\n def instrument_consumer(\n consumer: Consumer, tracer_provider=None\n ) -> ProxiedConsumer:\n tracer = trace.get_tracer(\n __name__, __version__, tracer_provider=tracer_provider\n )\n\n manual_consumer = ProxiedConsumer(consumer, tracer)\n\n return manual_consumer\n\n @staticmethod\n def uninstrument_producer(producer: Producer) -> Producer:\n if isinstance(producer, ProxiedProducer):\n return producer.original_producer()\n return producer\n\n @staticmethod\n def uninstrument_consumer(consumer: Consumer) -> Consumer:\n if isinstance(consumer, ProxiedConsumer):\n return consumer.original_consumer()\n return consumer\n\n def instrumentation_dependencies(self) -> Collection[str]:\n return _instruments\n\n def _instrument(self, **kwargs):\n self._original_kafka_producer = confluent_kafka.Producer\n self._original_kafka_consumer = confluent_kafka.Consumer\n\n confluent_kafka.Producer = AutoInstrumentedProducer\n confluent_kafka.Consumer = AutoInstrumentedConsumer\n\n tracer_provider = kwargs.get(\"tracer_provider\")\n tracer = trace.get_tracer(\n __name__, __version__, tracer_provider=tracer_provider\n )\n\n self._tracer = tracer\n\n def _inner_wrap_produce(func, instance, args, kwargs):\n return ConfluentKafkaInstrumentor.wrap_produce(\n func, instance, self._tracer, args, kwargs\n )\n\n def _inner_wrap_poll(func, instance, args, kwargs):\n return ConfluentKafkaInstrumentor.wrap_poll(\n func, instance, self._tracer, args, kwargs\n )\n\n wrapt.wrap_function_wrapper(\n AutoInstrumentedProducer,\n \"produce\",\n _inner_wrap_produce,\n )\n\n wrapt.wrap_function_wrapper(\n AutoInstrumentedConsumer,\n \"poll\",\n _inner_wrap_poll,\n )\n\n def _uninstrument(self, **kwargs):\n confluent_kafka.Producer = self._original_kafka_producer\n confluent_kafka.Consumer = self._original_kafka_consumer\n\n unwrap(AutoInstrumentedProducer, \"produce\")\n unwrap(AutoInstrumentedConsumer, \"poll\")\n\n @staticmethod\n def wrap_produce(func, instance, tracer, args, kwargs):\n topic = kwargs.get(\"topic\")\n if not topic:\n topic = args[0]\n\n span_name = _get_span_name(\"send\", topic)\n with tracer.start_as_current_span(\n name=span_name, kind=trace.SpanKind.PRODUCER\n ) as span:\n headers = KafkaPropertiesExtractor.extract_produce_headers(\n args, kwargs\n )\n if headers is None:\n headers = []\n kwargs[\"headers\"] = headers\n\n topic = KafkaPropertiesExtractor.extract_produce_topic(args)\n _enrich_span(\n span,\n topic,\n operation=MessagingOperationValues.RECEIVE,\n ) # Replace\n propagate.inject(\n headers,\n setter=_kafka_setter,\n )\n return func(*args, **kwargs)\n\n @staticmethod\n def wrap_poll(func, instance, tracer, args, kwargs):\n if instance._current_consume_span:\n context.detach(instance._current_context_token)\n instance._current_context_token = None\n instance._current_consume_span.end()\n instance._current_consume_span = None\n\n with tracer.start_as_current_span(\n \"recv\", end_on_exit=True, kind=trace.SpanKind.CONSUMER\n ):\n record = func(*args, **kwargs)\n if record:\n links = []\n ctx = propagate.extract(record.headers(), getter=_kafka_getter)\n if ctx:\n for item in ctx.values():\n if hasattr(item, \"get_span_context\"):\n links.append(Link(context=item.get_span_context()))\n\n instance._current_consume_span = tracer.start_span(\n name=f\"{record.topic()} process\",\n links=links,\n kind=SpanKind.CONSUMER,\n )\n\n _enrich_span(\n instance._current_consume_span,\n record.topic(),\n record.partition(),\n record.offset(),\n operation=MessagingOperationValues.PROCESS,\n )\n instance._current_context_token = context.attach(\n trace.set_span_in_context(instance._current_consume_span)\n )\n\n return record\n", "path": "instrumentation/opentelemetry-instrumentation-confluent-kafka/src/opentelemetry/instrumentation/confluent_kafka/__init__.py"}], "after_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nInstrument `confluent-kafka-python` to report instrumentation-confluent-kafka produced and consumed messages\n\nUsage\n-----\n\n..code:: python\n\n from opentelemetry.instrumentation.confluent_kafka import ConfluentKafkaInstrumentor\n from confluent_kafka import Producer, Consumer\n\n # Instrument kafka\n ConfluentKafkaInstrumentor().instrument()\n\n # report a span of type producer with the default settings\n conf1 = {'bootstrap.servers': \"localhost:9092\"}\n producer = Producer(conf1)\n producer.produce('my-topic',b'raw_bytes')\n\n conf2 = {'bootstrap.servers': \"localhost:9092\",\n 'group.id': \"foo\",\n 'auto.offset.reset': 'smallest'}\n # report a span of type consumer with the default settings\n consumer = Consumer(conf2)\n def basic_consume_loop(consumer, topics):\n try:\n consumer.subscribe(topics)\n running = True\n while running:\n msg = consumer.poll(timeout=1.0)\n if msg is None: continue\n\n if msg.error():\n if msg.error().code() == KafkaError._PARTITION_EOF:\n # End of partition event\n sys.stderr.write(f\"{msg.topic()} [{msg.partition()}] reached end at offset {msg.offset()}}\\n\")\n elif msg.error():\n raise KafkaException(msg.error())\n else:\n msg_process(msg)\n finally:\n # Close down consumer to commit final offsets.\n consumer.close()\n\n basic_consume_loop(consumer, \"my-topic\")\n\n\nThe `_instrument` method accepts the following keyword args:\ntracer_provider (TracerProvider) - an optional tracer provider\ninstrument_producer (Callable) - a function with extra user-defined logic to be performed before sending the message\n this function signature is:\n def instrument_producer(producer: Producer, tracer_provider=None)\ninstrument_consumer (Callable) - a function with extra user-defined logic to be performed after consuming a message\n this function signature is:\n def instrument_consumer(consumer: Consumer, tracer_provider=None)\nfor example:\n.. code: python\n from opentelemetry.instrumentation.confluent_kafka import ConfluentKafkaInstrumentor\n from confluent_kafka import Producer, Consumer\n\n inst = ConfluentKafkaInstrumentor()\n\n p = confluent_kafka.Producer({'bootstrap.servers': 'localhost:29092'})\n c = confluent_kafka.Consumer({\n 'bootstrap.servers': 'localhost:29092',\n 'group.id': 'mygroup',\n 'auto.offset.reset': 'earliest'\n })\n\n # instrument confluent kafka with produce and consume hooks\n p = inst.instrument_producer(p, tracer_provider)\n c = inst.instrument_consumer(c, tracer_provider=tracer_provider)\n\n\n # Using kafka as normal now will automatically generate spans,\n # including user custom attributes added from the hooks\n conf = {'bootstrap.servers': \"localhost:9092\"}\n p.produce('my-topic',b'raw_bytes')\n msg = c.poll()\n\n\nAPI\n___\n\"\"\"\nfrom typing import Collection\n\nimport confluent_kafka\nimport wrapt\nfrom confluent_kafka import Consumer, Producer\n\nfrom opentelemetry import context, propagate, trace\nfrom opentelemetry.instrumentation.instrumentor import BaseInstrumentor\nfrom opentelemetry.instrumentation.utils import unwrap\nfrom opentelemetry.semconv.trace import MessagingOperationValues\nfrom opentelemetry.trace import Link, SpanKind, Tracer\n\nfrom .package import _instruments\nfrom .utils import (\n KafkaPropertiesExtractor,\n _enrich_span,\n _get_span_name,\n _kafka_getter,\n _kafka_setter,\n)\nfrom .version import __version__\n\n\nclass AutoInstrumentedProducer(Producer):\n\n # This method is deliberately implemented in order to allow wrapt to wrap this function\n def produce(\n self, topic, value=None, *args, **kwargs\n ): # pylint: disable=keyword-arg-before-vararg,useless-super-delegation\n super().produce(topic, value, *args, **kwargs)\n\n\nclass AutoInstrumentedConsumer(Consumer):\n def __init__(self, config):\n super().__init__(config)\n self._current_consume_span = None\n\n # This method is deliberately implemented in order to allow wrapt to wrap this function\n def poll(self, timeout=-1): # pylint: disable=useless-super-delegation\n return super().poll(timeout)\n\n\nclass ProxiedProducer(Producer):\n def __init__(self, producer: Producer, tracer: Tracer):\n self._producer = producer\n self._tracer = tracer\n\n def flush(self, timeout=-1):\n self._producer.flush(timeout)\n\n def poll(self, timeout=-1):\n self._producer.poll(timeout)\n\n def produce(\n self, topic, value=None, *args, **kwargs\n ): # pylint: disable=keyword-arg-before-vararg\n new_kwargs = kwargs.copy()\n new_kwargs[\"topic\"] = topic\n new_kwargs[\"value\"] = value\n\n return ConfluentKafkaInstrumentor.wrap_produce(\n self._producer.produce, self, self._tracer, args, new_kwargs\n )\n\n def original_producer(self):\n return self._producer\n\n\nclass ProxiedConsumer(Consumer):\n def __init__(self, consumer: Consumer, tracer: Tracer):\n self._consumer = consumer\n self._tracer = tracer\n self._current_consume_span = None\n self._current_context_token = None\n\n def committed(self, partitions, timeout=-1):\n return self._consumer.committed(partitions, timeout)\n\n def consume(\n self, num_messages=1, *args, **kwargs\n ): # pylint: disable=keyword-arg-before-vararg\n return self._consumer.consume(num_messages, *args, **kwargs)\n\n def get_watermark_offsets(\n self, partition, timeout=-1, *args, **kwargs\n ): # pylint: disable=keyword-arg-before-vararg\n return self._consumer.get_watermark_offsets(\n partition, timeout, *args, **kwargs\n )\n\n def offsets_for_times(self, partitions, timeout=-1):\n return self._consumer.offsets_for_times(partitions, timeout)\n\n def poll(self, timeout=-1):\n return ConfluentKafkaInstrumentor.wrap_poll(\n self._consumer.poll, self, self._tracer, [timeout], {}\n )\n\n def subscribe(\n self, topics, on_assign=lambda *args: None, *args, **kwargs\n ): # pylint: disable=keyword-arg-before-vararg\n self._consumer.subscribe(topics, on_assign, *args, **kwargs)\n\n def original_consumer(self):\n return self._consumer\n\n\nclass ConfluentKafkaInstrumentor(BaseInstrumentor):\n \"\"\"An instrumentor for confluent kafka module\n See `BaseInstrumentor`\n \"\"\"\n\n # pylint: disable=attribute-defined-outside-init\n @staticmethod\n def instrument_producer(\n producer: Producer, tracer_provider=None\n ) -> ProxiedProducer:\n tracer = trace.get_tracer(\n __name__, __version__, tracer_provider=tracer_provider\n )\n\n manual_producer = ProxiedProducer(producer, tracer)\n\n return manual_producer\n\n @staticmethod\n def instrument_consumer(\n consumer: Consumer, tracer_provider=None\n ) -> ProxiedConsumer:\n tracer = trace.get_tracer(\n __name__, __version__, tracer_provider=tracer_provider\n )\n\n manual_consumer = ProxiedConsumer(consumer, tracer)\n\n return manual_consumer\n\n @staticmethod\n def uninstrument_producer(producer: Producer) -> Producer:\n if isinstance(producer, ProxiedProducer):\n return producer.original_producer()\n return producer\n\n @staticmethod\n def uninstrument_consumer(consumer: Consumer) -> Consumer:\n if isinstance(consumer, ProxiedConsumer):\n return consumer.original_consumer()\n return consumer\n\n def instrumentation_dependencies(self) -> Collection[str]:\n return _instruments\n\n def _instrument(self, **kwargs):\n self._original_kafka_producer = confluent_kafka.Producer\n self._original_kafka_consumer = confluent_kafka.Consumer\n\n confluent_kafka.Producer = AutoInstrumentedProducer\n confluent_kafka.Consumer = AutoInstrumentedConsumer\n\n tracer_provider = kwargs.get(\"tracer_provider\")\n tracer = trace.get_tracer(\n __name__, __version__, tracer_provider=tracer_provider\n )\n\n self._tracer = tracer\n\n def _inner_wrap_produce(func, instance, args, kwargs):\n return ConfluentKafkaInstrumentor.wrap_produce(\n func, instance, self._tracer, args, kwargs\n )\n\n def _inner_wrap_poll(func, instance, args, kwargs):\n return ConfluentKafkaInstrumentor.wrap_poll(\n func, instance, self._tracer, args, kwargs\n )\n\n wrapt.wrap_function_wrapper(\n AutoInstrumentedProducer,\n \"produce\",\n _inner_wrap_produce,\n )\n\n wrapt.wrap_function_wrapper(\n AutoInstrumentedConsumer,\n \"poll\",\n _inner_wrap_poll,\n )\n\n def _uninstrument(self, **kwargs):\n confluent_kafka.Producer = self._original_kafka_producer\n confluent_kafka.Consumer = self._original_kafka_consumer\n\n unwrap(AutoInstrumentedProducer, \"produce\")\n unwrap(AutoInstrumentedConsumer, \"poll\")\n\n @staticmethod\n def wrap_produce(func, instance, tracer, args, kwargs):\n topic = kwargs.get(\"topic\")\n if not topic:\n topic = args[0]\n\n span_name = _get_span_name(\"send\", topic)\n with tracer.start_as_current_span(\n name=span_name, kind=trace.SpanKind.PRODUCER\n ) as span:\n headers = KafkaPropertiesExtractor.extract_produce_headers(\n args, kwargs\n )\n if headers is None:\n headers = []\n kwargs[\"headers\"] = headers\n\n topic = KafkaPropertiesExtractor.extract_produce_topic(args)\n _enrich_span(\n span,\n topic,\n operation=MessagingOperationValues.RECEIVE,\n ) # Replace\n propagate.inject(\n headers,\n setter=_kafka_setter,\n )\n return func(*args, **kwargs)\n\n @staticmethod\n def wrap_poll(func, instance, tracer, args, kwargs):\n if instance._current_consume_span:\n context.detach(instance._current_context_token)\n instance._current_context_token = None\n instance._current_consume_span.end()\n instance._current_consume_span = None\n\n with tracer.start_as_current_span(\n \"recv\", end_on_exit=True, kind=trace.SpanKind.CONSUMER\n ):\n record = func(*args, **kwargs)\n if record:\n links = []\n ctx = propagate.extract(record.headers(), getter=_kafka_getter)\n if ctx:\n for item in ctx.values():\n if hasattr(item, \"get_span_context\"):\n links.append(Link(context=item.get_span_context()))\n\n instance._current_consume_span = tracer.start_span(\n name=f\"{record.topic()} process\",\n links=links,\n kind=SpanKind.CONSUMER,\n )\n\n _enrich_span(\n instance._current_consume_span,\n record.topic(),\n record.partition(),\n record.offset(),\n operation=MessagingOperationValues.PROCESS,\n )\n instance._current_context_token = context.attach(\n trace.set_span_in_context(instance._current_consume_span)\n )\n\n return record\n", "path": "instrumentation/opentelemetry-instrumentation-confluent-kafka/src/opentelemetry/instrumentation/confluent_kafka/__init__.py"}]}
| 4,095 | 309 |
gh_patches_debug_200
|
rasdani/github-patches
|
git_diff
|
scrapy__scrapy-1566
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
signals docs are confusing
It seems it is not explained how to connect a callback to a singnal anywhere in Scrapy docs.
http://doc.scrapy.org/en/latest/topics/signals.html tells:
> You can connect to signals (or send your own) through the [Signals API](http://doc.scrapy.org/en/latest/topics/api.html#topics-api-signals).
But if you follow this link you get docs for scrapy.signalmanager.SignalManager - that's fine, but it is not explained where to get a SignalManager instance from.
There is an example in Extension docs (http://doc.scrapy.org/en/latest/topics/extensions.html#sample-extension), but
a) this is just an example;
b) it is not explained that crawler.signals is a SignalManager instance;
c) this example is neither in Signals docs nor in SignalManager docs.
There is also a bit of information here: http://doc.scrapy.org/en/latest/topics/api.html#scrapy.crawler.Crawler.signals, but
a) it is not linked to neither from Signal docs nor from SignalManager, so you can't find it if you don't know about it already;
b) it is not explained that crawler.signals is the only way to access signals.
So in the end users may get some luck connecting signals if they start from Crawler docs, but almost no luck if they start from Signals docs.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scrapy/utils/misc.py`
Content:
```
1 """Helper functions which doesn't fit anywhere else"""
2 import re
3 import hashlib
4 from importlib import import_module
5 from pkgutil import iter_modules
6
7 import six
8 from w3lib.html import replace_entities
9
10 from scrapy.utils.python import flatten, to_unicode
11 from scrapy.item import BaseItem
12
13
14 _ITERABLE_SINGLE_VALUES = dict, BaseItem, six.text_type, bytes
15
16
17 def arg_to_iter(arg):
18 """Convert an argument to an iterable. The argument can be a None, single
19 value, or an iterable.
20
21 Exception: if arg is a dict, [arg] will be returned
22 """
23 if arg is None:
24 return []
25 elif not isinstance(arg, _ITERABLE_SINGLE_VALUES) and hasattr(arg, '__iter__'):
26 return arg
27 else:
28 return [arg]
29
30
31 def load_object(path):
32 """Load an object given its absolute object path, and return it.
33
34 object can be a class, function, variable or an instance.
35 path ie: 'scrapy.downloadermiddlewares.redirect.RedirectMiddleware'
36 """
37
38 try:
39 dot = path.rindex('.')
40 except ValueError:
41 raise ValueError("Error loading object '%s': not a full path" % path)
42
43 module, name = path[:dot], path[dot+1:]
44 mod = import_module(module)
45
46 try:
47 obj = getattr(mod, name)
48 except AttributeError:
49 raise NameError("Module '%s' doesn't define any object named '%s'" % (module, name))
50
51 return obj
52
53
54 def walk_modules(path):
55 """Loads a module and all its submodules from the given module path and
56 returns them. If *any* module throws an exception while importing, that
57 exception is thrown back.
58
59 For example: walk_modules('scrapy.utils')
60 """
61
62 mods = []
63 mod = import_module(path)
64 mods.append(mod)
65 if hasattr(mod, '__path__'):
66 for _, subpath, ispkg in iter_modules(mod.__path__):
67 fullpath = path + '.' + subpath
68 if ispkg:
69 mods += walk_modules(fullpath)
70 else:
71 submod = import_module(fullpath)
72 mods.append(submod)
73 return mods
74
75
76 def extract_regex(regex, text, encoding='utf-8'):
77 """Extract a list of unicode strings from the given text/encoding using the following policies:
78
79 * if the regex contains a named group called "extract" that will be returned
80 * if the regex contains multiple numbered groups, all those will be returned (flattened)
81 * if the regex doesn't contain any group the entire regex matching is returned
82 """
83
84 if isinstance(regex, six.string_types):
85 regex = re.compile(regex, re.UNICODE)
86
87 try:
88 strings = [regex.search(text).group('extract')] # named group
89 except:
90 strings = regex.findall(text) # full regex or numbered groups
91 strings = flatten(strings)
92
93 if isinstance(text, six.text_type):
94 return [replace_entities(s, keep=['lt', 'amp']) for s in strings]
95 else:
96 return [replace_entities(to_unicode(s, encoding), keep=['lt', 'amp'])
97 for s in strings]
98
99
100 def md5sum(file):
101 """Calculate the md5 checksum of a file-like object without reading its
102 whole content in memory.
103
104 >>> from io import BytesIO
105 >>> md5sum(BytesIO(b'file content to hash'))
106 '784406af91dd5a54fbb9c84c2236595a'
107 """
108 m = hashlib.md5()
109 while True:
110 d = file.read(8096)
111 if not d:
112 break
113 m.update(d)
114 return m.hexdigest()
115
116 def rel_has_nofollow(rel):
117 """Return True if link rel attribute has nofollow type"""
118 return True if rel is not None and 'nofollow' in rel.split() else False
119
120
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/scrapy/utils/misc.py b/scrapy/utils/misc.py
--- a/scrapy/utils/misc.py
+++ b/scrapy/utils/misc.py
@@ -1,4 +1,4 @@
-"""Helper functions which doesn't fit anywhere else"""
+"""Helper functions which don't fit anywhere else"""
import re
import hashlib
from importlib import import_module
|
{"golden_diff": "diff --git a/scrapy/utils/misc.py b/scrapy/utils/misc.py\n--- a/scrapy/utils/misc.py\n+++ b/scrapy/utils/misc.py\n@@ -1,4 +1,4 @@\n-\"\"\"Helper functions which doesn't fit anywhere else\"\"\"\n+\"\"\"Helper functions which don't fit anywhere else\"\"\"\n import re\n import hashlib\n from importlib import import_module\n", "issue": "signals docs are confusing\nIt seems it is not explained how to connect a callback to a singnal anywhere in Scrapy docs.\n\nhttp://doc.scrapy.org/en/latest/topics/signals.html tells:\n\n> You can connect to signals (or send your own) through the [Signals API](http://doc.scrapy.org/en/latest/topics/api.html#topics-api-signals).\n\nBut if you follow this link you get docs for scrapy.signalmanager.SignalManager - that's fine, but it is not explained where to get a SignalManager instance from.\n\nThere is an example in Extension docs (http://doc.scrapy.org/en/latest/topics/extensions.html#sample-extension), but\n\na) this is just an example;\nb) it is not explained that crawler.signals is a SignalManager instance;\nc) this example is neither in Signals docs nor in SignalManager docs.\n\nThere is also a bit of information here: http://doc.scrapy.org/en/latest/topics/api.html#scrapy.crawler.Crawler.signals, but\n\na) it is not linked to neither from Signal docs nor from SignalManager, so you can't find it if you don't know about it already;\nb) it is not explained that crawler.signals is the only way to access signals.\n\nSo in the end users may get some luck connecting signals if they start from Crawler docs, but almost no luck if they start from Signals docs.\n\n", "before_files": [{"content": "\"\"\"Helper functions which doesn't fit anywhere else\"\"\"\nimport re\nimport hashlib\nfrom importlib import import_module\nfrom pkgutil import iter_modules\n\nimport six\nfrom w3lib.html import replace_entities\n\nfrom scrapy.utils.python import flatten, to_unicode\nfrom scrapy.item import BaseItem\n\n\n_ITERABLE_SINGLE_VALUES = dict, BaseItem, six.text_type, bytes\n\n\ndef arg_to_iter(arg):\n \"\"\"Convert an argument to an iterable. The argument can be a None, single\n value, or an iterable.\n\n Exception: if arg is a dict, [arg] will be returned\n \"\"\"\n if arg is None:\n return []\n elif not isinstance(arg, _ITERABLE_SINGLE_VALUES) and hasattr(arg, '__iter__'):\n return arg\n else:\n return [arg]\n\n\ndef load_object(path):\n \"\"\"Load an object given its absolute object path, and return it.\n\n object can be a class, function, variable or an instance.\n path ie: 'scrapy.downloadermiddlewares.redirect.RedirectMiddleware'\n \"\"\"\n\n try:\n dot = path.rindex('.')\n except ValueError:\n raise ValueError(\"Error loading object '%s': not a full path\" % path)\n\n module, name = path[:dot], path[dot+1:]\n mod = import_module(module)\n\n try:\n obj = getattr(mod, name)\n except AttributeError:\n raise NameError(\"Module '%s' doesn't define any object named '%s'\" % (module, name))\n\n return obj\n\n\ndef walk_modules(path):\n \"\"\"Loads a module and all its submodules from the given module path and\n returns them. If *any* module throws an exception while importing, that\n exception is thrown back.\n\n For example: walk_modules('scrapy.utils')\n \"\"\"\n\n mods = []\n mod = import_module(path)\n mods.append(mod)\n if hasattr(mod, '__path__'):\n for _, subpath, ispkg in iter_modules(mod.__path__):\n fullpath = path + '.' + subpath\n if ispkg:\n mods += walk_modules(fullpath)\n else:\n submod = import_module(fullpath)\n mods.append(submod)\n return mods\n\n\ndef extract_regex(regex, text, encoding='utf-8'):\n \"\"\"Extract a list of unicode strings from the given text/encoding using the following policies:\n\n * if the regex contains a named group called \"extract\" that will be returned\n * if the regex contains multiple numbered groups, all those will be returned (flattened)\n * if the regex doesn't contain any group the entire regex matching is returned\n \"\"\"\n\n if isinstance(regex, six.string_types):\n regex = re.compile(regex, re.UNICODE)\n\n try:\n strings = [regex.search(text).group('extract')] # named group\n except:\n strings = regex.findall(text) # full regex or numbered groups\n strings = flatten(strings)\n\n if isinstance(text, six.text_type):\n return [replace_entities(s, keep=['lt', 'amp']) for s in strings]\n else:\n return [replace_entities(to_unicode(s, encoding), keep=['lt', 'amp'])\n for s in strings]\n\n\ndef md5sum(file):\n \"\"\"Calculate the md5 checksum of a file-like object without reading its\n whole content in memory.\n\n >>> from io import BytesIO\n >>> md5sum(BytesIO(b'file content to hash'))\n '784406af91dd5a54fbb9c84c2236595a'\n \"\"\"\n m = hashlib.md5()\n while True:\n d = file.read(8096)\n if not d:\n break\n m.update(d)\n return m.hexdigest()\n\ndef rel_has_nofollow(rel):\n \"\"\"Return True if link rel attribute has nofollow type\"\"\"\n return True if rel is not None and 'nofollow' in rel.split() else False\n \n", "path": "scrapy/utils/misc.py"}], "after_files": [{"content": "\"\"\"Helper functions which don't fit anywhere else\"\"\"\nimport re\nimport hashlib\nfrom importlib import import_module\nfrom pkgutil import iter_modules\n\nimport six\nfrom w3lib.html import replace_entities\n\nfrom scrapy.utils.python import flatten, to_unicode\nfrom scrapy.item import BaseItem\n\n\n_ITERABLE_SINGLE_VALUES = dict, BaseItem, six.text_type, bytes\n\n\ndef arg_to_iter(arg):\n \"\"\"Convert an argument to an iterable. The argument can be a None, single\n value, or an iterable.\n\n Exception: if arg is a dict, [arg] will be returned\n \"\"\"\n if arg is None:\n return []\n elif not isinstance(arg, _ITERABLE_SINGLE_VALUES) and hasattr(arg, '__iter__'):\n return arg\n else:\n return [arg]\n\n\ndef load_object(path):\n \"\"\"Load an object given its absolute object path, and return it.\n\n object can be a class, function, variable o instance.\n path ie: 'scrapy.downloadermiddlewares.redirect.RedirectMiddleware'\n \"\"\"\n\n try:\n dot = path.rindex('.')\n except ValueError:\n raise ValueError(\"Error loading object '%s': not a full path\" % path)\n\n module, name = path[:dot], path[dot+1:]\n mod = import_module(module)\n\n try:\n obj = getattr(mod, name)\n except AttributeError:\n raise NameError(\"Module '%s' doesn't define any object named '%s'\" % (module, name))\n\n return obj\n\n\ndef walk_modules(path):\n \"\"\"Loads a module and all its submodules from a the given module path and\n returns them. If *any* module throws an exception while importing, that\n exception is thrown back.\n\n For example: walk_modules('scrapy.utils')\n \"\"\"\n\n mods = []\n mod = import_module(path)\n mods.append(mod)\n if hasattr(mod, '__path__'):\n for _, subpath, ispkg in iter_modules(mod.__path__):\n fullpath = path + '.' + subpath\n if ispkg:\n mods += walk_modules(fullpath)\n else:\n submod = import_module(fullpath)\n mods.append(submod)\n return mods\n\n\ndef extract_regex(regex, text, encoding='utf-8'):\n \"\"\"Extract a list of unicode strings from the given text/encoding using the following policies:\n\n * if the regex contains a named group called \"extract\" that will be returned\n * if the regex contains multiple numbered groups, all those will be returned (flattened)\n * if the regex doesn't contain any group the entire regex matching is returned\n \"\"\"\n\n if isinstance(regex, six.string_types):\n regex = re.compile(regex, re.UNICODE)\n\n try:\n strings = [regex.search(text).group('extract')] # named group\n except:\n strings = regex.findall(text) # full regex or numbered groups\n strings = flatten(strings)\n\n if isinstance(text, six.text_type):\n return [replace_entities(s, keep=['lt', 'amp']) for s in strings]\n else:\n return [replace_entities(to_unicode(s, encoding), keep=['lt', 'amp'])\n for s in strings]\n\n\ndef md5sum(file):\n \"\"\"Calculate the md5 checksum of a file-like object without reading its\n whole content in memory.\n\n >>> from io import BytesIO\n >>> md5sum(BytesIO(b'file content to hash'))\n '784406af91dd5a54fbb9c84c2236595a'\n \"\"\"\n m = hashlib.md5()\n while True:\n d = file.read(8096)\n if not d:\n break\n m.update(d)\n return m.hexdigest()\n\ndef rel_has_nofollow(rel):\n \"\"\"Return True if link rel attribute has nofollow type\"\"\"\n return True if rel is not None and 'nofollow' in rel.split() else False\n \n", "path": "scrapy/utils/misc.py"}]}
| 1,652 | 77 |
gh_patches_debug_30112
|
rasdani/github-patches
|
git_diff
|
sonic-net__sonic-mgmt-4489
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Investigate backend T0 nighty failure during restart ptf
Latest run failed with the following error.
TASK [vm_set : Setup vlan port for vlan tunnel] ********************************
Wednesday 13 October 2021 12:17:07 +0000 (0:00:00.098) 0:00:22.391 *****
fatal: [STR2-ACS-SERV-18]: FAILED! => {"changed": false, "module_stderr": "Shared connection to 10.64.246.97 closed.\r\n", "module_stdout": "Traceback (most recent call last):\r\n File \"/home/azure/.ansible/tmp/ansible-tmp-1634127427.08-60041-68522534642110/AnsiballZ_vlan_port.py\", line 114, in <module>\r\n _ansiballz_main()\r\n File \"/home/azure/.ansible/tmp/ansible-tmp-1634127427.08-60041-68522534642110/AnsiballZ_vlan_port.py\", line 106, in _ansiballz_main\r\n invoke_module(zipped_mod, temp_path, ANSIBALLZ_PARAMS)\r\n File \"/home/azure/.ansible/tmp/ansible-tmp-1634127427.08-60041-68522534642110/AnsiballZ_vlan_port.py\", line 49, in invoke_module\r\n imp.load_module('__main__', mod, module, MOD_DESC)\r\n File \"/tmp/ansible_vlan_port_payload_apPh5r/__main__.py\", line 151, in <module>\r\n File \"/tmp/ansible_vlan_port_payload_apPh5r/__main__.py\", line 139, in main\r\n File \"/tmp/ansible_vlan_port_payload_apPh5r/__main__.py\", line 65, in create_vlan_ports\r\n File \"/tmp/ansible_vlan_port_payload_apPh5r/__main__.py\", line 50, in create_vlan_port\r\n File \"/tmp/ansible_vlan_port_payload_apPh5r/__main__.py\", line 113, in cmd\r\nException: ret_code=3, error message=ERROR: trying to add VLAN #2020 to IF -:enp59s0f1:- error: File exists\r\n. cmd=vconfig add enp59s0f1 2020\r\n", "msg": "MODULE FAILURE\nSee stdout/stderr for the exact error", "rc": 1}
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ansible/roles/vm_set/library/vlan_port.py`
Content:
```
1 #!/usr/bin/python
2
3 import re
4 import sys
5 import time
6 import subprocess
7 from pprint import pprint
8 from ansible.module_utils.basic import *
9
10 DOCUMENTATION = '''
11 module: vlan_port
12 version_added: "0.1"
13 author: Guohan Lu ([email protected])
14 short_description: Get/Create/Remove vlan tunnel port in the test server for physical DUT
15 '''
16
17 EXAMPLES = '''
18 - name: Set front panel port for vlan tunnel
19 vlan_port:
20 external_port: "{{ external_port }}"
21 vlan_ids: "{{ device_vlan_list }}"
22 cmd: "list"
23 '''
24
25 DOCUMENTATION = '''
26 - external_port: external port
27 - vlan_ids: vlan list
28 '''
29
30 CMD_DEBUG_FNAME = '/tmp/vlan_port.cmds.txt'
31 EXCEPTION_DEBUG_FNAME = '/tmp/vlan_port.exception.txt'
32
33 class VlanPort(object):
34 def __init__(self, external_port, vlan_ids):
35 self.external_port = external_port
36 self.vlan_ids = vlan_ids
37 self.host_ifaces = VlanPort.ifconfig('ifconfig -a')
38
39 return
40
41 def up_external_port(self):
42 if self.external_port in self.host_ifaces:
43 VlanPort.iface_up(self.external_port)
44
45 return
46
47 def create_vlan_port(self, port, vlan_id):
48 vlan_port = "%s.%d" % (port, vlan_id)
49 if vlan_port not in self.host_ifaces:
50 VlanPort.cmd('vconfig add %s %d' % (port, vlan_id))
51
52 VlanPort.iface_up(vlan_port)
53
54 return
55
56 def destroy_vlan_port(self, vlan_port):
57 if vlan_port in self.host_ifaces:
58 VlanPort.iface_down(vlan_port)
59 VlanPort.cmd('vconfig rem %s' % vlan_port)
60
61 return
62
63 def create_vlan_ports(self):
64 for vlan_id in self.vlan_ids.values():
65 self.create_vlan_port(self.external_port, vlan_id)
66
67 def remove_vlan_ports(self):
68 for vlan_id in self.vlan_ids.values():
69 vlan_port = "%s.%d" % (self.external_port, vlan_id)
70 self.destroy_vlan_port(vlan_port)
71
72 @staticmethod
73 def ifconfig(cmdline):
74 out = VlanPort.cmd(cmdline)
75
76 ifaces = set()
77
78 rows = out.split('\n')
79 for row in rows:
80 if len(row) == 0:
81 continue
82 terms = row.split()
83 if not row[0].isspace():
84 ifaces.add(terms[0].rstrip(':'))
85
86 return ifaces
87
88 @staticmethod
89 def iface_up(iface_name, pid=None):
90 return VlanPort.iface_updown(iface_name, 'up', pid)
91
92 @staticmethod
93 def iface_down(iface_name, pid=None):
94 return VlanPort.iface_updown(iface_name, 'down', pid)
95
96 @staticmethod
97 def iface_updown(iface_name, state, pid):
98 if pid is None:
99 return VlanPort.cmd('ip link set %s %s' % (iface_name, state))
100 else:
101 return VlanPort.cmd('nsenter -t %s -n ip link set %s %s' % (pid, iface_name, state))
102
103 @staticmethod
104 def cmd(cmdline):
105 with open(CMD_DEBUG_FNAME, 'a') as fp:
106 pprint("CMD: %s" % cmdline, fp)
107 cmd = cmdline.split(' ')
108 process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE)
109 stdout, stderr = process.communicate()
110 ret_code = process.returncode
111
112 if ret_code != 0:
113 raise Exception("ret_code=%d, error message=%s. cmd=%s" % (ret_code, stderr, cmdline))
114
115 with open(CMD_DEBUG_FNAME, 'a') as fp:
116 pprint("OUTPUT: %s" % stdout, fp)
117
118 return stdout.decode('utf-8')
119
120
121 def main():
122
123 module = AnsibleModule(argument_spec=dict(
124 cmd=dict(required=True, choices=['create', 'remove', 'list']),
125 external_port=dict(required=True, type='str'),
126 vlan_ids=dict(required=True, type='dict'),
127 ))
128
129 cmd = module.params['cmd']
130 external_port = module.params['external_port']
131 vlan_ids = module.params['vlan_ids']
132
133 fp_ports = {}
134
135 vp = VlanPort(external_port, vlan_ids)
136
137 vp.up_external_port()
138 if cmd == "create":
139 vp.create_vlan_ports()
140 elif cmd == "remove":
141 vp.remove_vlan_ports()
142
143 fp_port_templ = external_port + ".%s"
144 for a_port_index, vid in vlan_ids.items():
145 fp_ports[a_port_index] = fp_port_templ % vid
146
147 module.exit_json(changed=False, ansible_facts={'dut_fp_ports': fp_ports})
148
149
150 if __name__ == "__main__":
151 main()
152
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/ansible/roles/vm_set/library/vlan_port.py b/ansible/roles/vm_set/library/vlan_port.py
--- a/ansible/roles/vm_set/library/vlan_port.py
+++ b/ansible/roles/vm_set/library/vlan_port.py
@@ -47,6 +47,7 @@
def create_vlan_port(self, port, vlan_id):
vlan_port = "%s.%d" % (port, vlan_id)
if vlan_port not in self.host_ifaces:
+ VlanPort.cmd('vconfig rem %s' % vlan_port, True)
VlanPort.cmd('vconfig add %s %d' % (port, vlan_id))
VlanPort.iface_up(vlan_port)
@@ -101,7 +102,7 @@
return VlanPort.cmd('nsenter -t %s -n ip link set %s %s' % (pid, iface_name, state))
@staticmethod
- def cmd(cmdline):
+ def cmd(cmdline, ignore_error=False):
with open(CMD_DEBUG_FNAME, 'a') as fp:
pprint("CMD: %s" % cmdline, fp)
cmd = cmdline.split(' ')
@@ -109,11 +110,14 @@
stdout, stderr = process.communicate()
ret_code = process.returncode
- if ret_code != 0:
+ if ret_code != 0 and not ignore_error:
raise Exception("ret_code=%d, error message=%s. cmd=%s" % (ret_code, stderr, cmdline))
with open(CMD_DEBUG_FNAME, 'a') as fp:
- pprint("OUTPUT: %s" % stdout, fp)
+ if ret_code == 0:
+ pprint("OUTPUT: %s" % stdout, fp)
+ else:
+ pprint("ERR: %s" % stderr, fp)
return stdout.decode('utf-8')
|
{"golden_diff": "diff --git a/ansible/roles/vm_set/library/vlan_port.py b/ansible/roles/vm_set/library/vlan_port.py\n--- a/ansible/roles/vm_set/library/vlan_port.py\n+++ b/ansible/roles/vm_set/library/vlan_port.py\n@@ -47,6 +47,7 @@\n def create_vlan_port(self, port, vlan_id):\n vlan_port = \"%s.%d\" % (port, vlan_id)\n if vlan_port not in self.host_ifaces:\n+ VlanPort.cmd('vconfig rem %s' % vlan_port, True)\n VlanPort.cmd('vconfig add %s %d' % (port, vlan_id))\n \n VlanPort.iface_up(vlan_port)\n@@ -101,7 +102,7 @@\n return VlanPort.cmd('nsenter -t %s -n ip link set %s %s' % (pid, iface_name, state))\n \n @staticmethod\n- def cmd(cmdline):\n+ def cmd(cmdline, ignore_error=False):\n with open(CMD_DEBUG_FNAME, 'a') as fp:\n pprint(\"CMD: %s\" % cmdline, fp)\n cmd = cmdline.split(' ')\n@@ -109,11 +110,14 @@\n stdout, stderr = process.communicate()\n ret_code = process.returncode\n \n- if ret_code != 0:\n+ if ret_code != 0 and not ignore_error:\n raise Exception(\"ret_code=%d, error message=%s. cmd=%s\" % (ret_code, stderr, cmdline))\n \n with open(CMD_DEBUG_FNAME, 'a') as fp:\n- pprint(\"OUTPUT: %s\" % stdout, fp)\n+ if ret_code == 0:\n+ pprint(\"OUTPUT: %s\" % stdout, fp)\n+ else:\n+ pprint(\"ERR: %s\" % stderr, fp)\n \n return stdout.decode('utf-8')\n", "issue": "Investigate backend T0 nighty failure during restart ptf\nLatest run failed with the following error. \r\n\r\nTASK [vm_set : Setup vlan port for vlan tunnel] ********************************\r\nWednesday 13 October 2021 12:17:07 +0000 (0:00:00.098) 0:00:22.391 ***** \r\nfatal: [STR2-ACS-SERV-18]: FAILED! => {\"changed\": false, \"module_stderr\": \"Shared connection to 10.64.246.97 closed.\\r\\n\", \"module_stdout\": \"Traceback (most recent call last):\\r\\n File \\\"/home/azure/.ansible/tmp/ansible-tmp-1634127427.08-60041-68522534642110/AnsiballZ_vlan_port.py\\\", line 114, in <module>\\r\\n _ansiballz_main()\\r\\n File \\\"/home/azure/.ansible/tmp/ansible-tmp-1634127427.08-60041-68522534642110/AnsiballZ_vlan_port.py\\\", line 106, in _ansiballz_main\\r\\n invoke_module(zipped_mod, temp_path, ANSIBALLZ_PARAMS)\\r\\n File \\\"/home/azure/.ansible/tmp/ansible-tmp-1634127427.08-60041-68522534642110/AnsiballZ_vlan_port.py\\\", line 49, in invoke_module\\r\\n imp.load_module('__main__', mod, module, MOD_DESC)\\r\\n File \\\"/tmp/ansible_vlan_port_payload_apPh5r/__main__.py\\\", line 151, in <module>\\r\\n File \\\"/tmp/ansible_vlan_port_payload_apPh5r/__main__.py\\\", line 139, in main\\r\\n File \\\"/tmp/ansible_vlan_port_payload_apPh5r/__main__.py\\\", line 65, in create_vlan_ports\\r\\n File \\\"/tmp/ansible_vlan_port_payload_apPh5r/__main__.py\\\", line 50, in create_vlan_port\\r\\n File \\\"/tmp/ansible_vlan_port_payload_apPh5r/__main__.py\\\", line 113, in cmd\\r\\nException: ret_code=3, error message=ERROR: trying to add VLAN #2020 to IF -:enp59s0f1:- error: File exists\\r\\n. cmd=vconfig add enp59s0f1 2020\\r\\n\", \"msg\": \"MODULE FAILURE\\nSee stdout/stderr for the exact error\", \"rc\": 1}\r\n\n", "before_files": [{"content": "#!/usr/bin/python\n\nimport re\nimport sys\nimport time\nimport subprocess\nfrom pprint import pprint\nfrom ansible.module_utils.basic import *\n\nDOCUMENTATION = '''\nmodule: vlan_port\nversion_added: \"0.1\"\nauthor: Guohan Lu ([email protected])\nshort_description: Get/Create/Remove vlan tunnel port in the test server for physical DUT\n'''\n\nEXAMPLES = '''\n- name: Set front panel port for vlan tunnel\n vlan_port:\n external_port: \"{{ external_port }}\"\n vlan_ids: \"{{ device_vlan_list }}\"\n cmd: \"list\"\n'''\n\nDOCUMENTATION = '''\n - external_port: external port\n - vlan_ids: vlan list\n'''\n\nCMD_DEBUG_FNAME = '/tmp/vlan_port.cmds.txt'\nEXCEPTION_DEBUG_FNAME = '/tmp/vlan_port.exception.txt'\n\nclass VlanPort(object):\n def __init__(self, external_port, vlan_ids):\n self.external_port = external_port\n self.vlan_ids = vlan_ids\n self.host_ifaces = VlanPort.ifconfig('ifconfig -a')\n\n return\n\n def up_external_port(self):\n if self.external_port in self.host_ifaces:\n VlanPort.iface_up(self.external_port)\n\n return\n\n def create_vlan_port(self, port, vlan_id):\n vlan_port = \"%s.%d\" % (port, vlan_id)\n if vlan_port not in self.host_ifaces:\n VlanPort.cmd('vconfig add %s %d' % (port, vlan_id))\n\n VlanPort.iface_up(vlan_port)\n\n return\n\n def destroy_vlan_port(self, vlan_port):\n if vlan_port in self.host_ifaces:\n VlanPort.iface_down(vlan_port)\n VlanPort.cmd('vconfig rem %s' % vlan_port)\n\n return\n\n def create_vlan_ports(self):\n for vlan_id in self.vlan_ids.values():\n self.create_vlan_port(self.external_port, vlan_id)\n\n def remove_vlan_ports(self):\n for vlan_id in self.vlan_ids.values():\n vlan_port = \"%s.%d\" % (self.external_port, vlan_id)\n self.destroy_vlan_port(vlan_port)\n\n @staticmethod\n def ifconfig(cmdline):\n out = VlanPort.cmd(cmdline)\n\n ifaces = set()\n\n rows = out.split('\\n')\n for row in rows:\n if len(row) == 0:\n continue\n terms = row.split()\n if not row[0].isspace():\n ifaces.add(terms[0].rstrip(':'))\n\n return ifaces\n\n @staticmethod\n def iface_up(iface_name, pid=None):\n return VlanPort.iface_updown(iface_name, 'up', pid)\n\n @staticmethod\n def iface_down(iface_name, pid=None):\n return VlanPort.iface_updown(iface_name, 'down', pid)\n\n @staticmethod\n def iface_updown(iface_name, state, pid):\n if pid is None:\n return VlanPort.cmd('ip link set %s %s' % (iface_name, state))\n else:\n return VlanPort.cmd('nsenter -t %s -n ip link set %s %s' % (pid, iface_name, state))\n\n @staticmethod\n def cmd(cmdline):\n with open(CMD_DEBUG_FNAME, 'a') as fp:\n pprint(\"CMD: %s\" % cmdline, fp)\n cmd = cmdline.split(' ')\n process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE)\n stdout, stderr = process.communicate()\n ret_code = process.returncode\n\n if ret_code != 0:\n raise Exception(\"ret_code=%d, error message=%s. cmd=%s\" % (ret_code, stderr, cmdline))\n\n with open(CMD_DEBUG_FNAME, 'a') as fp:\n pprint(\"OUTPUT: %s\" % stdout, fp)\n\n return stdout.decode('utf-8')\n\n\ndef main():\n\n module = AnsibleModule(argument_spec=dict(\n cmd=dict(required=True, choices=['create', 'remove', 'list']),\n external_port=dict(required=True, type='str'),\n vlan_ids=dict(required=True, type='dict'),\n ))\n\n cmd = module.params['cmd']\n external_port = module.params['external_port']\n vlan_ids = module.params['vlan_ids']\n\n fp_ports = {}\n\n vp = VlanPort(external_port, vlan_ids)\n\n vp.up_external_port()\n if cmd == \"create\":\n vp.create_vlan_ports()\n elif cmd == \"remove\":\n vp.remove_vlan_ports()\n\n fp_port_templ = external_port + \".%s\"\n for a_port_index, vid in vlan_ids.items():\n fp_ports[a_port_index] = fp_port_templ % vid\n\n module.exit_json(changed=False, ansible_facts={'dut_fp_ports': fp_ports})\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "ansible/roles/vm_set/library/vlan_port.py"}], "after_files": [{"content": "#!/usr/bin/python\n\nimport re\nimport sys\nimport time\nimport subprocess\nfrom pprint import pprint\nfrom ansible.module_utils.basic import *\n\nDOCUMENTATION = '''\nmodule: vlan_port\nversion_added: \"0.1\"\nauthor: Guohan Lu ([email protected])\nshort_description: Get/Create/Remove vlan tunnel port in the test server for physical DUT\n'''\n\nEXAMPLES = '''\n- name: Set front panel port for vlan tunnel\n vlan_port:\n external_port: \"{{ external_port }}\"\n vlan_ids: \"{{ device_vlan_list }}\"\n cmd: \"list\"\n'''\n\nDOCUMENTATION = '''\n - external_port: external port\n - vlan_ids: vlan list\n'''\n\nCMD_DEBUG_FNAME = '/tmp/vlan_port.cmds.txt'\nEXCEPTION_DEBUG_FNAME = '/tmp/vlan_port.exception.txt'\n\nclass VlanPort(object):\n def __init__(self, external_port, vlan_ids):\n self.external_port = external_port\n self.vlan_ids = vlan_ids\n self.host_ifaces = VlanPort.ifconfig('ifconfig -a')\n\n return\n\n def up_external_port(self):\n if self.external_port in self.host_ifaces:\n VlanPort.iface_up(self.external_port)\n\n return\n\n def create_vlan_port(self, port, vlan_id):\n vlan_port = \"%s.%d\" % (port, vlan_id)\n if vlan_port not in self.host_ifaces:\n VlanPort.cmd('vconfig rem %s' % vlan_port, True)\n VlanPort.cmd('vconfig add %s %d' % (port, vlan_id))\n\n VlanPort.iface_up(vlan_port)\n\n return\n\n def destroy_vlan_port(self, vlan_port):\n if vlan_port in self.host_ifaces:\n VlanPort.iface_down(vlan_port)\n VlanPort.cmd('vconfig rem %s' % vlan_port)\n\n return\n\n def create_vlan_ports(self):\n for vlan_id in self.vlan_ids.values():\n self.create_vlan_port(self.external_port, vlan_id)\n\n def remove_vlan_ports(self):\n for vlan_id in self.vlan_ids.values():\n vlan_port = \"%s.%d\" % (self.external_port, vlan_id)\n self.destroy_vlan_port(vlan_port)\n\n @staticmethod\n def ifconfig(cmdline):\n out = VlanPort.cmd(cmdline)\n\n ifaces = set()\n\n rows = out.split('\\n')\n for row in rows:\n if len(row) == 0:\n continue\n terms = row.split()\n if not row[0].isspace():\n ifaces.add(terms[0].rstrip(':'))\n\n return ifaces\n\n @staticmethod\n def iface_up(iface_name, pid=None):\n return VlanPort.iface_updown(iface_name, 'up', pid)\n\n @staticmethod\n def iface_down(iface_name, pid=None):\n return VlanPort.iface_updown(iface_name, 'down', pid)\n\n @staticmethod\n def iface_updown(iface_name, state, pid):\n if pid is None:\n return VlanPort.cmd('ip link set %s %s' % (iface_name, state))\n else:\n return VlanPort.cmd('nsenter -t %s -n ip link set %s %s' % (pid, iface_name, state))\n\n @staticmethod\n def cmd(cmdline, ignore_error=False):\n with open(CMD_DEBUG_FNAME, 'a') as fp:\n pprint(\"CMD: %s\" % cmdline, fp)\n cmd = cmdline.split(' ')\n process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE)\n stdout, stderr = process.communicate()\n ret_code = process.returncode\n\n if ret_code != 0 and not ignore_error:\n raise Exception(\"ret_code=%d, error message=%s. cmd=%s\" % (ret_code, stderr, cmdline))\n\n with open(CMD_DEBUG_FNAME, 'a') as fp:\n if ret_code == 0:\n pprint(\"OUTPUT: %s\" % stdout, fp)\n else:\n pprint(\"ERR: %s\" % stderr, fp)\n\n return stdout.decode('utf-8')\n\n\ndef main():\n\n module = AnsibleModule(argument_spec=dict(\n cmd=dict(required=True, choices=['create', 'remove', 'list']),\n external_port=dict(required=True, type='str'),\n vlan_ids=dict(required=True, type='dict'),\n ))\n\n cmd = module.params['cmd']\n external_port = module.params['external_port']\n vlan_ids = module.params['vlan_ids']\n\n fp_ports = {}\n\n vp = VlanPort(external_port, vlan_ids)\n\n vp.up_external_port()\n if cmd == \"create\":\n vp.create_vlan_ports()\n elif cmd == \"remove\":\n vp.remove_vlan_ports()\n\n fp_port_templ = external_port + \".%s\"\n for a_port_index, vid in vlan_ids.items():\n fp_ports[a_port_index] = fp_port_templ % vid\n\n module.exit_json(changed=False, ansible_facts={'dut_fp_ports': fp_ports})\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "ansible/roles/vm_set/library/vlan_port.py"}]}
| 2,337 | 430 |
gh_patches_debug_29045
|
rasdani/github-patches
|
git_diff
|
litestar-org__litestar-2864
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Bug: OpenAPI schema generation fails due to same operation IDs
### Description
If two routes with the same path, but different methods are defined then the OpenAPI generation fails due to both of them having the same value for operation ID. After running `git bisect`, #2805 seems to have introduced this.
### URL to code causing the issue
_No response_
### MCVE
```python
from litestar import get, post
from litestar.app import Litestar
from litestar.testing import create_test_client
@post("/")
async def post_handler() -> None:
...
@get("/")
async def get_handler() -> None:
...
with create_test_client([post_handler, get_handler]) as client:
response = client.get("/schema/openapi.json")
assert response.status_code == 200
```
### Steps to reproduce
_No response_
### Screenshots
_No response_
### Logs
_No response_
### Litestar Version
HEAD
### Platform
- [ ] Linux
- [ ] Mac
- [ ] Windows
- [ ] Other (Please specify in the description above)
<!-- POLAR PLEDGE BADGE START -->
---
> [!NOTE]
> While we are open for sponsoring on [GitHub Sponsors](https://github.com/sponsors/litestar-org/) and
> [OpenCollective](https://opencollective.com/litestar), we also utilize [Polar.sh](https://polar.sh/) to engage in pledge-based sponsorship.
>
> Check out all issues funded or available for funding [on our Polar.sh dashboard](https://polar.sh/litestar-org)
> * If you would like to see an issue prioritized, make a pledge towards it!
> * We receive the pledge once the issue is completed & verified
> * This, along with engagement in the community, helps us know which features are a priority to our users.
<a href="https://polar.sh/litestar-org/litestar/issues/2863">
<picture>
<source media="(prefers-color-scheme: dark)" srcset="https://polar.sh/api/github/litestar-org/litestar/issues/2863/pledge.svg?darkmode=1">
<img alt="Fund with Polar" src="https://polar.sh/api/github/litestar-org/litestar/issues/2863/pledge.svg">
</picture>
</a>
<!-- POLAR PLEDGE BADGE END -->
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `litestar/_openapi/plugin.py`
Content:
```
1 from __future__ import annotations
2
3 from typing import TYPE_CHECKING
4
5 from litestar._openapi.datastructures import OpenAPIContext
6 from litestar._openapi.path_item import create_path_item_for_route
7 from litestar.exceptions import ImproperlyConfiguredException
8 from litestar.plugins import InitPluginProtocol
9 from litestar.plugins.base import ReceiveRoutePlugin
10 from litestar.routes import HTTPRoute
11
12 if TYPE_CHECKING:
13 from litestar.app import Litestar
14 from litestar.config.app import AppConfig
15 from litestar.openapi.config import OpenAPIConfig
16 from litestar.openapi.spec import OpenAPI
17 from litestar.routes import BaseRoute
18
19
20 class OpenAPIPlugin(InitPluginProtocol, ReceiveRoutePlugin):
21 __slots__ = (
22 "app",
23 "included_routes",
24 "_openapi_config",
25 "_openapi_schema",
26 )
27
28 def __init__(self, app: Litestar) -> None:
29 self.app = app
30 self.included_routes: list[HTTPRoute] = []
31 self._openapi_config: OpenAPIConfig | None = None
32 self._openapi_schema: OpenAPI | None = None
33
34 def _build_openapi_schema(self) -> OpenAPI:
35 openapi = self.openapi_config.to_openapi_schema()
36 context = OpenAPIContext(openapi_config=self.openapi_config, plugins=self.app.plugins.openapi)
37 openapi.paths = {
38 route.path_format or "/": create_path_item_for_route(context, route) for route in self.included_routes
39 }
40 openapi.components.schemas = context.schema_registry.generate_components_schemas()
41 return openapi
42
43 def provide_openapi(self) -> OpenAPI:
44 if not self._openapi_schema:
45 self._openapi_schema = self._build_openapi_schema()
46 return self._openapi_schema
47
48 def on_app_init(self, app_config: AppConfig) -> AppConfig:
49 if app_config.openapi_config:
50 self._openapi_config = app_config.openapi_config
51 app_config.route_handlers.append(self.openapi_config.openapi_controller)
52 return app_config
53
54 @property
55 def openapi_config(self) -> OpenAPIConfig:
56 if not self._openapi_config:
57 raise ImproperlyConfiguredException("OpenAPIConfig not initialized")
58 return self._openapi_config
59
60 def receive_route(self, route: BaseRoute) -> None:
61 if not isinstance(route, HTTPRoute):
62 return
63
64 if any(route_handler.resolve_include_in_schema() for route_handler, _ in route.route_handler_map.values()):
65 # Force recompute the schema if a new route is added
66 self._openapi_schema = None
67 self.included_routes.append(route)
68
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/litestar/_openapi/plugin.py b/litestar/_openapi/plugin.py
--- a/litestar/_openapi/plugin.py
+++ b/litestar/_openapi/plugin.py
@@ -27,7 +27,7 @@
def __init__(self, app: Litestar) -> None:
self.app = app
- self.included_routes: list[HTTPRoute] = []
+ self.included_routes: dict[str, HTTPRoute] = {}
self._openapi_config: OpenAPIConfig | None = None
self._openapi_schema: OpenAPI | None = None
@@ -35,7 +35,8 @@
openapi = self.openapi_config.to_openapi_schema()
context = OpenAPIContext(openapi_config=self.openapi_config, plugins=self.app.plugins.openapi)
openapi.paths = {
- route.path_format or "/": create_path_item_for_route(context, route) for route in self.included_routes
+ route.path_format or "/": create_path_item_for_route(context, route)
+ for route in self.included_routes.values()
}
openapi.components.schemas = context.schema_registry.generate_components_schemas()
return openapi
@@ -64,4 +65,4 @@
if any(route_handler.resolve_include_in_schema() for route_handler, _ in route.route_handler_map.values()):
# Force recompute the schema if a new route is added
self._openapi_schema = None
- self.included_routes.append(route)
+ self.included_routes[route.path] = route
|
{"golden_diff": "diff --git a/litestar/_openapi/plugin.py b/litestar/_openapi/plugin.py\n--- a/litestar/_openapi/plugin.py\n+++ b/litestar/_openapi/plugin.py\n@@ -27,7 +27,7 @@\n \n def __init__(self, app: Litestar) -> None:\n self.app = app\n- self.included_routes: list[HTTPRoute] = []\n+ self.included_routes: dict[str, HTTPRoute] = {}\n self._openapi_config: OpenAPIConfig | None = None\n self._openapi_schema: OpenAPI | None = None\n \n@@ -35,7 +35,8 @@\n openapi = self.openapi_config.to_openapi_schema()\n context = OpenAPIContext(openapi_config=self.openapi_config, plugins=self.app.plugins.openapi)\n openapi.paths = {\n- route.path_format or \"/\": create_path_item_for_route(context, route) for route in self.included_routes\n+ route.path_format or \"/\": create_path_item_for_route(context, route)\n+ for route in self.included_routes.values()\n }\n openapi.components.schemas = context.schema_registry.generate_components_schemas()\n return openapi\n@@ -64,4 +65,4 @@\n if any(route_handler.resolve_include_in_schema() for route_handler, _ in route.route_handler_map.values()):\n # Force recompute the schema if a new route is added\n self._openapi_schema = None\n- self.included_routes.append(route)\n+ self.included_routes[route.path] = route\n", "issue": "Bug: OpenAPI schema generation fails due to same operation IDs\n### Description\n\nIf two routes with the same path, but different methods are defined then the OpenAPI generation fails due to both of them having the same value for operation ID. After running `git bisect`, #2805 seems to have introduced this.\n\n### URL to code causing the issue\n\n_No response_\n\n### MCVE\n\n```python\nfrom litestar import get, post\r\nfrom litestar.app import Litestar\r\nfrom litestar.testing import create_test_client\r\n\r\n\r\n@post(\"/\")\r\nasync def post_handler() -> None:\r\n ...\r\n\r\n\r\n@get(\"/\")\r\nasync def get_handler() -> None:\r\n ...\r\n\r\n\r\nwith create_test_client([post_handler, get_handler]) as client:\r\n response = client.get(\"/schema/openapi.json\")\r\n\r\n assert response.status_code == 200\n```\n\n\n### Steps to reproduce\n\n_No response_\n\n### Screenshots\n\n_No response_\n\n### Logs\n\n_No response_\n\n### Litestar Version\n\nHEAD\n\n### Platform\n\n- [ ] Linux\n- [ ] Mac\n- [ ] Windows\n- [ ] Other (Please specify in the description above)\n\n<!-- POLAR PLEDGE BADGE START -->\n---\n> [!NOTE] \n> While we are open for sponsoring on [GitHub Sponsors](https://github.com/sponsors/litestar-org/) and \n> [OpenCollective](https://opencollective.com/litestar), we also utilize [Polar.sh](https://polar.sh/) to engage in pledge-based sponsorship.\n>\n> Check out all issues funded or available for funding [on our Polar.sh dashboard](https://polar.sh/litestar-org)\n> * If you would like to see an issue prioritized, make a pledge towards it!\n> * We receive the pledge once the issue is completed & verified\n> * This, along with engagement in the community, helps us know which features are a priority to our users.\n\n<a href=\"https://polar.sh/litestar-org/litestar/issues/2863\">\n<picture>\n <source media=\"(prefers-color-scheme: dark)\" srcset=\"https://polar.sh/api/github/litestar-org/litestar/issues/2863/pledge.svg?darkmode=1\">\n <img alt=\"Fund with Polar\" src=\"https://polar.sh/api/github/litestar-org/litestar/issues/2863/pledge.svg\">\n</picture>\n</a>\n<!-- POLAR PLEDGE BADGE END -->\n\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom typing import TYPE_CHECKING\n\nfrom litestar._openapi.datastructures import OpenAPIContext\nfrom litestar._openapi.path_item import create_path_item_for_route\nfrom litestar.exceptions import ImproperlyConfiguredException\nfrom litestar.plugins import InitPluginProtocol\nfrom litestar.plugins.base import ReceiveRoutePlugin\nfrom litestar.routes import HTTPRoute\n\nif TYPE_CHECKING:\n from litestar.app import Litestar\n from litestar.config.app import AppConfig\n from litestar.openapi.config import OpenAPIConfig\n from litestar.openapi.spec import OpenAPI\n from litestar.routes import BaseRoute\n\n\nclass OpenAPIPlugin(InitPluginProtocol, ReceiveRoutePlugin):\n __slots__ = (\n \"app\",\n \"included_routes\",\n \"_openapi_config\",\n \"_openapi_schema\",\n )\n\n def __init__(self, app: Litestar) -> None:\n self.app = app\n self.included_routes: list[HTTPRoute] = []\n self._openapi_config: OpenAPIConfig | None = None\n self._openapi_schema: OpenAPI | None = None\n\n def _build_openapi_schema(self) -> OpenAPI:\n openapi = self.openapi_config.to_openapi_schema()\n context = OpenAPIContext(openapi_config=self.openapi_config, plugins=self.app.plugins.openapi)\n openapi.paths = {\n route.path_format or \"/\": create_path_item_for_route(context, route) for route in self.included_routes\n }\n openapi.components.schemas = context.schema_registry.generate_components_schemas()\n return openapi\n\n def provide_openapi(self) -> OpenAPI:\n if not self._openapi_schema:\n self._openapi_schema = self._build_openapi_schema()\n return self._openapi_schema\n\n def on_app_init(self, app_config: AppConfig) -> AppConfig:\n if app_config.openapi_config:\n self._openapi_config = app_config.openapi_config\n app_config.route_handlers.append(self.openapi_config.openapi_controller)\n return app_config\n\n @property\n def openapi_config(self) -> OpenAPIConfig:\n if not self._openapi_config:\n raise ImproperlyConfiguredException(\"OpenAPIConfig not initialized\")\n return self._openapi_config\n\n def receive_route(self, route: BaseRoute) -> None:\n if not isinstance(route, HTTPRoute):\n return\n\n if any(route_handler.resolve_include_in_schema() for route_handler, _ in route.route_handler_map.values()):\n # Force recompute the schema if a new route is added\n self._openapi_schema = None\n self.included_routes.append(route)\n", "path": "litestar/_openapi/plugin.py"}], "after_files": [{"content": "from __future__ import annotations\n\nfrom typing import TYPE_CHECKING\n\nfrom litestar._openapi.datastructures import OpenAPIContext\nfrom litestar._openapi.path_item import create_path_item_for_route\nfrom litestar.exceptions import ImproperlyConfiguredException\nfrom litestar.plugins import InitPluginProtocol\nfrom litestar.plugins.base import ReceiveRoutePlugin\nfrom litestar.routes import HTTPRoute\n\nif TYPE_CHECKING:\n from litestar.app import Litestar\n from litestar.config.app import AppConfig\n from litestar.openapi.config import OpenAPIConfig\n from litestar.openapi.spec import OpenAPI\n from litestar.routes import BaseRoute\n\n\nclass OpenAPIPlugin(InitPluginProtocol, ReceiveRoutePlugin):\n __slots__ = (\n \"app\",\n \"included_routes\",\n \"_openapi_config\",\n \"_openapi_schema\",\n )\n\n def __init__(self, app: Litestar) -> None:\n self.app = app\n self.included_routes: dict[str, HTTPRoute] = {}\n self._openapi_config: OpenAPIConfig | None = None\n self._openapi_schema: OpenAPI | None = None\n\n def _build_openapi_schema(self) -> OpenAPI:\n openapi = self.openapi_config.to_openapi_schema()\n context = OpenAPIContext(openapi_config=self.openapi_config, plugins=self.app.plugins.openapi)\n openapi.paths = {\n route.path_format or \"/\": create_path_item_for_route(context, route)\n for route in self.included_routes.values()\n }\n openapi.components.schemas = context.schema_registry.generate_components_schemas()\n return openapi\n\n def provide_openapi(self) -> OpenAPI:\n if not self._openapi_schema:\n self._openapi_schema = self._build_openapi_schema()\n return self._openapi_schema\n\n def on_app_init(self, app_config: AppConfig) -> AppConfig:\n if app_config.openapi_config:\n self._openapi_config = app_config.openapi_config\n app_config.route_handlers.append(self.openapi_config.openapi_controller)\n return app_config\n\n @property\n def openapi_config(self) -> OpenAPIConfig:\n if not self._openapi_config:\n raise ImproperlyConfiguredException(\"OpenAPIConfig not initialized\")\n return self._openapi_config\n\n def receive_route(self, route: BaseRoute) -> None:\n if not isinstance(route, HTTPRoute):\n return\n\n if any(route_handler.resolve_include_in_schema() for route_handler, _ in route.route_handler_map.values()):\n # Force recompute the schema if a new route is added\n self._openapi_schema = None\n self.included_routes[route.path] = route\n", "path": "litestar/_openapi/plugin.py"}]}
| 1,478 | 339 |
gh_patches_debug_31890
|
rasdani/github-patches
|
git_diff
|
lutris__lutris-1996
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Lutris fails to open .tar.gz with "not a gzip file" if file is a tar file
Lutris fails to open archive where 7z doesn't. If I rename the file to `TTRLinux-v1.2.3.tar` Lutris can open and extract it. `format: tar` doesn't work here.
```
files:
- client: https://download.toontownrewritten.com/launcher/linux/TTRLinux-v1.2.3.tar.gz
game:
exe: Launcher
installer:
- extract:
file: client
format: tar
```
```
2019-04-04 16:53:40,809: Downloading file 1 of 1
2019-04-04 16:53:40,811: Downloading [client]: https://download.toontownrewritten.com/launcher/linux/TTRLinux-v1.2.3.tar.gz to /home/cxf/.cache/lutris/installer/toontown-rewritten/client/TTRLinux-v1.2.3.tar.gz
2019-04-04 16:53:40,814: Downloading https://download.toontownrewritten.com/launcher/linux/TTRLinux-v1.2.3.tar.gz to /home/cxf/.cache/lutris/installer/toontown-rewritten/client/TTRLinux-v1.2.3.tar.gz
2019-04-04 16:53:40,816: Starting download of:
https://download.toontownrewritten.com/launcher/linux/TTRLinux-v1.2.3.tar.gz
2019-04-04 16:53:44,410: Finished downloading https://download.toontownrewritten.com/launcher/linux/TTRLinux-v1.2.3.tar.gz
2019-04-04 16:53:44,822: Installer command: {'extract': {'format': 'tar', 'file': 'client'}}
2019-04-04 16:53:44,823: Extracting TTRLinux-v1.2.3.tar.gz
2019-04-04 16:53:44,824: extracting file /home/cxf/.cache/lutris/installer/toontown-rewritten/client/TTRLinux-v1.2.3.tar.gz to /home/cxf/Games/toontown-rewritten
2019-04-04 16:53:44,831: Extracting /home/cxf/.cache/lutris/installer/toontown-rewritten/client/TTRLinux-v1.2.3.tar.gz to /home/cxf/Games/toontown-rewritten
2019-04-04 16:53:44,832: Extraction failed: not a gzip file
Traceback (most recent call last):
File "/usr/lib/python3.6/tarfile.py", line 1643, in gzopen
t = cls.taropen(name, mode, fileobj, **kwargs)
File "/usr/lib/python3.6/tarfile.py", line 1619, in taropen
return cls(name, mode, fileobj, **kwargs)
File "/usr/lib/python3.6/tarfile.py", line 1482, in __init__
self.firstmember = self.next()
File "/usr/lib/python3.6/tarfile.py", line 2297, in next
tarinfo = self.tarinfo.fromtarfile(self)
File "/usr/lib/python3.6/tarfile.py", line 1092, in fromtarfile
buf = tarfile.fileobj.read(BLOCKSIZE)
File "/usr/lib/python3.6/gzip.py", line 276, in read
return self._buffer.read(size)
File "/usr/lib/python3.6/_compression.py", line 68, in readinto
data = self.read(len(byte_view))
File "/usr/lib/python3.6/gzip.py", line 463, in read
if not self._read_gzip_header():
File "/usr/lib/python3.6/gzip.py", line 411, in _read_gzip_header
raise OSError('Not a gzipped file (%r)' % magic)
OSError: Not a gzipped file (b'To')
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/cxf/.lutris/lutris/util/extract.py", line 90, in extract_archive
_do_extract(path, temp_path, opener, mode, extractor)
File "/home/cxf/.lutris/lutris/util/extract.py", line 134, in _do_extract
handler = opener(archive, mode)
File "/usr/lib/python3.6/tarfile.py", line 1589, in open
return func(name, filemode, fileobj, **kwargs)
File "/usr/lib/python3.6/tarfile.py", line 1647, in gzopen
raise ReadError("not a gzip file")
tarfile.ReadError: not a gzip file
2019-04-04 16:53:44,835: Error while completing task <bound method CommandsMixin.extract of <lutris.installer.interpreter.ScriptInterpreter object at 0x7f0507748c88>>: not a gzip file
<class 'lutris.util.extract.ExtractFailure'> not a gzip file
File "/home/cxf/.lutris/lutris/util/jobs.py", line 30, in target
result = self.function(*args, **kwargs)
File "/home/cxf/.lutris/lutris/installer/commands.py", line 180, in extract
extract.extract_archive, filename, dest_path, merge_single, extractor
File "/home/cxf/.lutris/lutris/installer/commands.py", line 527, in _killable_process
result = result_obj.get() # Wait process end & reraise exceptions
File "/usr/lib/python3.6/multiprocessing/pool.py", line 670, in get
raise self._value
2019-04-04 16:53:49,289: Install cancelled
```
```
cxf@PC:~/Downloads$ 7z e TTRLinux-v1.2.3.tar.gz
7-Zip [64] 16.02 : Copyright (c) 1999-2016 Igor Pavlov : 2016-05-21
p7zip Version 16.02 (locale=en_US.UTF-8,Utf16=on,HugeFiles=on,64 bits,4 CPUs Intel(R) Core(TM) i5-4460 CPU @ 3.20GHz (306C3),ASM,AES-NI)
Scanning the drive for archives:
1 file, 37233152 bytes (36 MiB)
Extracting archive: TTRLinux-v1.2.3.tar.gz
WARNING:
TTRLinux-v1.2.3.tar.gz
Can not open the file as [gzip] archive
The file is open as [tar] archive
--
Path = TTRLinux-v1.2.3.tar.gz
Open WARNING: Can not open the file as [gzip] archive
Type = tar
Physical Size = 37233152
Headers Size = 2048
Code Page = UTF-8
Everything is Ok
Archives with Warnings: 1
Folders: 1
Files: 1
Size: 37231062
Compressed: 37233152
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lutris/util/extract.py`
Content:
```
1 import os
2 import uuid
3 import shutil
4 import tarfile
5 import subprocess
6 import gzip
7 import zlib
8 from lutris.util import system
9 from lutris.util.log import logger
10 from lutris import settings
11
12
13 class ExtractFailure(Exception):
14 """Exception raised when and archive fails to extract"""
15
16
17 def is_7zip_supported(path, extractor):
18 supported_extractors = (
19 "7z",
20 "xz",
21 "bzip2",
22 "gzip",
23 "tar",
24 "zip",
25 "ar",
26 "arj",
27 "cab",
28 "chm",
29 "cpio",
30 "cramfs",
31 "dmg",
32 "ext",
33 "fat",
34 "gpt",
35 "hfs",
36 "ihex",
37 "iso",
38 "lzh",
39 "lzma",
40 "mbr",
41 "msi",
42 "nsis",
43 "ntfs",
44 "qcow2",
45 "rar",
46 "rpm",
47 "squashfs",
48 "udf",
49 "uefi",
50 "vdi",
51 "vhd",
52 "vmdk",
53 "wim",
54 "xar",
55 "z",
56 )
57 if extractor:
58 return extractor.lower() in supported_extractors
59 _base, ext = os.path.splitext(path)
60 if ext:
61 ext = ext.lstrip(".").lower()
62 return ext in supported_extractors
63
64
65 def extract_archive(path, to_directory=".", merge_single=True, extractor=None):
66 path = os.path.abspath(path)
67 mode = None
68 logger.debug("Extracting %s to %s", path, to_directory)
69
70 if path.endswith(".tar.gz") or path.endswith(".tgz") or extractor == "tgz":
71 opener, mode = tarfile.open, "r:gz"
72 elif path.endswith(".tar.xz") or path.endswith(".txz") or extractor == "txz":
73 opener, mode = tarfile.open, "r:xz"
74 elif path.endswith(".tar") or extractor == "tar":
75 opener, mode = tarfile.open, "r:"
76 elif path.endswith(".gz") or extractor == "gzip":
77 decompress_gz(path, to_directory)
78 return
79 elif path.endswith(".tar.bz2") or path.endswith(".tbz") or extractor == "bz2":
80 opener, mode = tarfile.open, "r:bz2"
81 elif is_7zip_supported(path, extractor):
82 opener = "7zip"
83 else:
84 raise RuntimeError(
85 "Could not extract `%s` as no appropriate extractor is found" % path
86 )
87 temp_name = ".extract-" + str(uuid.uuid4())[:8]
88 temp_path = temp_dir = os.path.join(to_directory, temp_name)
89 try:
90 _do_extract(path, temp_path, opener, mode, extractor)
91 except (OSError, zlib.error, tarfile.ReadError) as ex:
92 logger.exception("Extraction failed: %s", ex)
93 raise ExtractFailure(str(ex))
94 if merge_single:
95 extracted = os.listdir(temp_path)
96 if len(extracted) == 1:
97 temp_path = os.path.join(temp_path, extracted[0])
98
99 if os.path.isfile(temp_path):
100 destination_path = os.path.join(to_directory, extracted[0])
101 if os.path.isfile(destination_path):
102 logger.warning("Overwrite existing file %s", destination_path)
103 os.remove(destination_path)
104 shutil.move(temp_path, to_directory)
105 os.removedirs(temp_dir)
106 else:
107 for archive_file in os.listdir(temp_path):
108 source_path = os.path.join(temp_path, archive_file)
109 destination_path = os.path.join(to_directory, archive_file)
110 # logger.debug("Moving extracted files from %s to %s", source_path, destination_path)
111
112 if system.path_exists(destination_path):
113 logger.warning("Overwrite existing path %s", destination_path)
114 if os.path.isfile(destination_path):
115 os.remove(destination_path)
116 shutil.move(source_path, destination_path)
117 elif os.path.isdir(destination_path):
118 try:
119 system.merge_folders(source_path, destination_path)
120 except OSError as ex:
121 logger.error("Failed to merge to destination %s: %s", destination_path, ex)
122 raise ExtractFailure(str(ex))
123 else:
124 shutil.move(source_path, destination_path)
125 system.remove_folder(temp_dir)
126 logger.debug("Finished extracting %s to %s", path, to_directory)
127 return path, to_directory
128
129
130 def _do_extract(archive, dest, opener, mode=None, extractor=None):
131 if opener == "7zip":
132 extract_7zip(archive, dest, archive_type=extractor)
133 else:
134 handler = opener(archive, mode)
135 handler.extractall(dest)
136 handler.close()
137
138
139 def decompress_gz(file_path, dest_path=None):
140 """Decompress a gzip file."""
141 if dest_path:
142 dest_filename = os.path.join(dest_path, os.path.basename(file_path[:-3]))
143 else:
144 dest_filename = file_path[:-3]
145
146 gzipped_file = gzip.open(file_path, "rb")
147 file_content = gzipped_file.read()
148 gzipped_file.close()
149
150 dest_file = open(dest_filename, "wb")
151 dest_file.write(file_content)
152 dest_file.close()
153
154 return dest_path
155
156
157 def extract_7zip(path, dest, archive_type=None):
158 _7zip_path = os.path.join(settings.RUNTIME_DIR, "p7zip/7z")
159 if not system.path_exists(_7zip_path):
160 _7zip_path = system.find_executable("7z")
161 if not system.path_exists(_7zip_path):
162 raise OSError("7zip is not found in the lutris runtime or on the system")
163 command = [_7zip_path, "x", path, "-o{}".format(dest), "-aoa"]
164 if archive_type:
165 command.append("-t{}".format(archive_type))
166 subprocess.call(command)
167
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/lutris/util/extract.py b/lutris/util/extract.py
--- a/lutris/util/extract.py
+++ b/lutris/util/extract.py
@@ -67,23 +67,42 @@
mode = None
logger.debug("Extracting %s to %s", path, to_directory)
- if path.endswith(".tar.gz") or path.endswith(".tgz") or extractor == "tgz":
+ if extractor is None:
+ if path.endswith(".tar.gz") or path.endswith(".tgz"):
+ extractor = "tgz"
+ elif path.endswith(".tar.xz") or path.endswith(".txz"):
+ extractor = "txz"
+ elif path.endswith(".tar"):
+ extractor = "tar"
+ elif path.endswith(".tar.bz2") or path.endswith(".tbz"):
+ extractor = "bz2"
+ elif path.endswith(".gz"):
+ extractor = "gzip"
+ elif is_7zip_supported(path, None):
+ extractor = None
+ else:
+ raise RuntimeError(
+ "Could not extract `%s` - no appropriate extractor found" % path
+ )
+
+ if extractor == "tgz":
opener, mode = tarfile.open, "r:gz"
- elif path.endswith(".tar.xz") or path.endswith(".txz") or extractor == "txz":
+ elif extractor == "txz":
opener, mode = tarfile.open, "r:xz"
- elif path.endswith(".tar") or extractor == "tar":
+ elif extractor == "tar":
opener, mode = tarfile.open, "r:"
- elif path.endswith(".gz") or extractor == "gzip":
+ elif extractor == "bz2":
+ opener, mode = tarfile.open, "r:bz2"
+ elif extractor == "gzip":
decompress_gz(path, to_directory)
return
- elif path.endswith(".tar.bz2") or path.endswith(".tbz") or extractor == "bz2":
- opener, mode = tarfile.open, "r:bz2"
- elif is_7zip_supported(path, extractor):
+ elif extractor is None or is_7zip_supported(path, extractor):
opener = "7zip"
else:
raise RuntimeError(
- "Could not extract `%s` as no appropriate extractor is found" % path
+ "Could not extract `%s` - unknown format specified" % path
)
+
temp_name = ".extract-" + str(uuid.uuid4())[:8]
temp_path = temp_dir = os.path.join(to_directory, temp_name)
try:
|
{"golden_diff": "diff --git a/lutris/util/extract.py b/lutris/util/extract.py\n--- a/lutris/util/extract.py\n+++ b/lutris/util/extract.py\n@@ -67,23 +67,42 @@\n mode = None\n logger.debug(\"Extracting %s to %s\", path, to_directory)\n \n- if path.endswith(\".tar.gz\") or path.endswith(\".tgz\") or extractor == \"tgz\":\n+ if extractor is None:\n+ if path.endswith(\".tar.gz\") or path.endswith(\".tgz\"):\n+ extractor = \"tgz\"\n+ elif path.endswith(\".tar.xz\") or path.endswith(\".txz\"):\n+ extractor = \"txz\"\n+ elif path.endswith(\".tar\"):\n+ extractor = \"tar\"\n+ elif path.endswith(\".tar.bz2\") or path.endswith(\".tbz\"):\n+ extractor = \"bz2\"\n+ elif path.endswith(\".gz\"):\n+ extractor = \"gzip\"\n+ elif is_7zip_supported(path, None):\n+ extractor = None\n+ else:\n+ raise RuntimeError(\n+ \"Could not extract `%s` - no appropriate extractor found\" % path\n+ )\n+\n+ if extractor == \"tgz\":\n opener, mode = tarfile.open, \"r:gz\"\n- elif path.endswith(\".tar.xz\") or path.endswith(\".txz\") or extractor == \"txz\":\n+ elif extractor == \"txz\":\n opener, mode = tarfile.open, \"r:xz\"\n- elif path.endswith(\".tar\") or extractor == \"tar\":\n+ elif extractor == \"tar\":\n opener, mode = tarfile.open, \"r:\"\n- elif path.endswith(\".gz\") or extractor == \"gzip\":\n+ elif extractor == \"bz2\":\n+ opener, mode = tarfile.open, \"r:bz2\"\n+ elif extractor == \"gzip\":\n decompress_gz(path, to_directory)\n return\n- elif path.endswith(\".tar.bz2\") or path.endswith(\".tbz\") or extractor == \"bz2\":\n- opener, mode = tarfile.open, \"r:bz2\"\n- elif is_7zip_supported(path, extractor):\n+ elif extractor is None or is_7zip_supported(path, extractor):\n opener = \"7zip\"\n else:\n raise RuntimeError(\n- \"Could not extract `%s` as no appropriate extractor is found\" % path\n+ \"Could not extract `%s` - unknown format specified\" % path\n )\n+\n temp_name = \".extract-\" + str(uuid.uuid4())[:8]\n temp_path = temp_dir = os.path.join(to_directory, temp_name)\n try:\n", "issue": "Lutris fails to open .tar.gz with \"not a gzip file\" if file is a tar file\nLutris fails to open archive where 7z doesn't. If I rename the file to `TTRLinux-v1.2.3.tar` Lutris can open and extract it. `format: tar` doesn't work here.\r\n\r\n```\r\nfiles:\r\n- client: https://download.toontownrewritten.com/launcher/linux/TTRLinux-v1.2.3.tar.gz\r\ngame:\r\n exe: Launcher\r\ninstaller:\r\n- extract:\r\n file: client\r\n format: tar\r\n```\r\n\r\n```\r\n2019-04-04 16:53:40,809: Downloading file 1 of 1\r\n2019-04-04 16:53:40,811: Downloading [client]: https://download.toontownrewritten.com/launcher/linux/TTRLinux-v1.2.3.tar.gz to /home/cxf/.cache/lutris/installer/toontown-rewritten/client/TTRLinux-v1.2.3.tar.gz\r\n2019-04-04 16:53:40,814: Downloading https://download.toontownrewritten.com/launcher/linux/TTRLinux-v1.2.3.tar.gz to /home/cxf/.cache/lutris/installer/toontown-rewritten/client/TTRLinux-v1.2.3.tar.gz\r\n2019-04-04 16:53:40,816: Starting download of:\r\n https://download.toontownrewritten.com/launcher/linux/TTRLinux-v1.2.3.tar.gz\r\n2019-04-04 16:53:44,410: Finished downloading https://download.toontownrewritten.com/launcher/linux/TTRLinux-v1.2.3.tar.gz\r\n2019-04-04 16:53:44,822: Installer command: {'extract': {'format': 'tar', 'file': 'client'}}\r\n2019-04-04 16:53:44,823: Extracting TTRLinux-v1.2.3.tar.gz\r\n2019-04-04 16:53:44,824: extracting file /home/cxf/.cache/lutris/installer/toontown-rewritten/client/TTRLinux-v1.2.3.tar.gz to /home/cxf/Games/toontown-rewritten\r\n2019-04-04 16:53:44,831: Extracting /home/cxf/.cache/lutris/installer/toontown-rewritten/client/TTRLinux-v1.2.3.tar.gz to /home/cxf/Games/toontown-rewritten\r\n2019-04-04 16:53:44,832: Extraction failed: not a gzip file\r\nTraceback (most recent call last):\r\n File \"/usr/lib/python3.6/tarfile.py\", line 1643, in gzopen\r\n t = cls.taropen(name, mode, fileobj, **kwargs)\r\n File \"/usr/lib/python3.6/tarfile.py\", line 1619, in taropen\r\n return cls(name, mode, fileobj, **kwargs)\r\n File \"/usr/lib/python3.6/tarfile.py\", line 1482, in __init__\r\n self.firstmember = self.next()\r\n File \"/usr/lib/python3.6/tarfile.py\", line 2297, in next\r\n tarinfo = self.tarinfo.fromtarfile(self)\r\n File \"/usr/lib/python3.6/tarfile.py\", line 1092, in fromtarfile\r\n buf = tarfile.fileobj.read(BLOCKSIZE)\r\n File \"/usr/lib/python3.6/gzip.py\", line 276, in read\r\n return self._buffer.read(size)\r\n File \"/usr/lib/python3.6/_compression.py\", line 68, in readinto\r\n data = self.read(len(byte_view))\r\n File \"/usr/lib/python3.6/gzip.py\", line 463, in read\r\n if not self._read_gzip_header():\r\n File \"/usr/lib/python3.6/gzip.py\", line 411, in _read_gzip_header\r\n raise OSError('Not a gzipped file (%r)' % magic)\r\nOSError: Not a gzipped file (b'To')\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nTraceback (most recent call last):\r\n File \"/home/cxf/.lutris/lutris/util/extract.py\", line 90, in extract_archive\r\n _do_extract(path, temp_path, opener, mode, extractor)\r\n File \"/home/cxf/.lutris/lutris/util/extract.py\", line 134, in _do_extract\r\n handler = opener(archive, mode)\r\n File \"/usr/lib/python3.6/tarfile.py\", line 1589, in open\r\n return func(name, filemode, fileobj, **kwargs)\r\n File \"/usr/lib/python3.6/tarfile.py\", line 1647, in gzopen\r\n raise ReadError(\"not a gzip file\")\r\ntarfile.ReadError: not a gzip file\r\n2019-04-04 16:53:44,835: Error while completing task <bound method CommandsMixin.extract of <lutris.installer.interpreter.ScriptInterpreter object at 0x7f0507748c88>>: not a gzip file\r\n<class 'lutris.util.extract.ExtractFailure'> not a gzip file\r\n File \"/home/cxf/.lutris/lutris/util/jobs.py\", line 30, in target\r\n result = self.function(*args, **kwargs)\r\n File \"/home/cxf/.lutris/lutris/installer/commands.py\", line 180, in extract\r\n extract.extract_archive, filename, dest_path, merge_single, extractor\r\n File \"/home/cxf/.lutris/lutris/installer/commands.py\", line 527, in _killable_process\r\n result = result_obj.get() # Wait process end & reraise exceptions\r\n File \"/usr/lib/python3.6/multiprocessing/pool.py\", line 670, in get\r\n raise self._value\r\n2019-04-04 16:53:49,289: Install cancelled\r\n```\r\n\r\n```\r\ncxf@PC:~/Downloads$ 7z e TTRLinux-v1.2.3.tar.gz \r\n\r\n7-Zip [64] 16.02 : Copyright (c) 1999-2016 Igor Pavlov : 2016-05-21\r\np7zip Version 16.02 (locale=en_US.UTF-8,Utf16=on,HugeFiles=on,64 bits,4 CPUs Intel(R) Core(TM) i5-4460 CPU @ 3.20GHz (306C3),ASM,AES-NI)\r\n\r\nScanning the drive for archives:\r\n1 file, 37233152 bytes (36 MiB)\r\n\r\nExtracting archive: TTRLinux-v1.2.3.tar.gz\r\nWARNING:\r\nTTRLinux-v1.2.3.tar.gz\r\nCan not open the file as [gzip] archive\r\nThe file is open as [tar] archive\r\n\r\n--\r\nPath = TTRLinux-v1.2.3.tar.gz\r\nOpen WARNING: Can not open the file as [gzip] archive\r\nType = tar\r\nPhysical Size = 37233152\r\nHeaders Size = 2048\r\nCode Page = UTF-8\r\n\r\nEverything is Ok\r\n\r\nArchives with Warnings: 1\r\nFolders: 1\r\nFiles: 1\r\nSize: 37231062\r\nCompressed: 37233152\r\n```\r\n\n", "before_files": [{"content": "import os\nimport uuid\nimport shutil\nimport tarfile\nimport subprocess\nimport gzip\nimport zlib\nfrom lutris.util import system\nfrom lutris.util.log import logger\nfrom lutris import settings\n\n\nclass ExtractFailure(Exception):\n \"\"\"Exception raised when and archive fails to extract\"\"\"\n\n\ndef is_7zip_supported(path, extractor):\n supported_extractors = (\n \"7z\",\n \"xz\",\n \"bzip2\",\n \"gzip\",\n \"tar\",\n \"zip\",\n \"ar\",\n \"arj\",\n \"cab\",\n \"chm\",\n \"cpio\",\n \"cramfs\",\n \"dmg\",\n \"ext\",\n \"fat\",\n \"gpt\",\n \"hfs\",\n \"ihex\",\n \"iso\",\n \"lzh\",\n \"lzma\",\n \"mbr\",\n \"msi\",\n \"nsis\",\n \"ntfs\",\n \"qcow2\",\n \"rar\",\n \"rpm\",\n \"squashfs\",\n \"udf\",\n \"uefi\",\n \"vdi\",\n \"vhd\",\n \"vmdk\",\n \"wim\",\n \"xar\",\n \"z\",\n )\n if extractor:\n return extractor.lower() in supported_extractors\n _base, ext = os.path.splitext(path)\n if ext:\n ext = ext.lstrip(\".\").lower()\n return ext in supported_extractors\n\n\ndef extract_archive(path, to_directory=\".\", merge_single=True, extractor=None):\n path = os.path.abspath(path)\n mode = None\n logger.debug(\"Extracting %s to %s\", path, to_directory)\n\n if path.endswith(\".tar.gz\") or path.endswith(\".tgz\") or extractor == \"tgz\":\n opener, mode = tarfile.open, \"r:gz\"\n elif path.endswith(\".tar.xz\") or path.endswith(\".txz\") or extractor == \"txz\":\n opener, mode = tarfile.open, \"r:xz\"\n elif path.endswith(\".tar\") or extractor == \"tar\":\n opener, mode = tarfile.open, \"r:\"\n elif path.endswith(\".gz\") or extractor == \"gzip\":\n decompress_gz(path, to_directory)\n return\n elif path.endswith(\".tar.bz2\") or path.endswith(\".tbz\") or extractor == \"bz2\":\n opener, mode = tarfile.open, \"r:bz2\"\n elif is_7zip_supported(path, extractor):\n opener = \"7zip\"\n else:\n raise RuntimeError(\n \"Could not extract `%s` as no appropriate extractor is found\" % path\n )\n temp_name = \".extract-\" + str(uuid.uuid4())[:8]\n temp_path = temp_dir = os.path.join(to_directory, temp_name)\n try:\n _do_extract(path, temp_path, opener, mode, extractor)\n except (OSError, zlib.error, tarfile.ReadError) as ex:\n logger.exception(\"Extraction failed: %s\", ex)\n raise ExtractFailure(str(ex))\n if merge_single:\n extracted = os.listdir(temp_path)\n if len(extracted) == 1:\n temp_path = os.path.join(temp_path, extracted[0])\n\n if os.path.isfile(temp_path):\n destination_path = os.path.join(to_directory, extracted[0])\n if os.path.isfile(destination_path):\n logger.warning(\"Overwrite existing file %s\", destination_path)\n os.remove(destination_path)\n shutil.move(temp_path, to_directory)\n os.removedirs(temp_dir)\n else:\n for archive_file in os.listdir(temp_path):\n source_path = os.path.join(temp_path, archive_file)\n destination_path = os.path.join(to_directory, archive_file)\n # logger.debug(\"Moving extracted files from %s to %s\", source_path, destination_path)\n\n if system.path_exists(destination_path):\n logger.warning(\"Overwrite existing path %s\", destination_path)\n if os.path.isfile(destination_path):\n os.remove(destination_path)\n shutil.move(source_path, destination_path)\n elif os.path.isdir(destination_path):\n try:\n system.merge_folders(source_path, destination_path)\n except OSError as ex:\n logger.error(\"Failed to merge to destination %s: %s\", destination_path, ex)\n raise ExtractFailure(str(ex))\n else:\n shutil.move(source_path, destination_path)\n system.remove_folder(temp_dir)\n logger.debug(\"Finished extracting %s to %s\", path, to_directory)\n return path, to_directory\n\n\ndef _do_extract(archive, dest, opener, mode=None, extractor=None):\n if opener == \"7zip\":\n extract_7zip(archive, dest, archive_type=extractor)\n else:\n handler = opener(archive, mode)\n handler.extractall(dest)\n handler.close()\n\n\ndef decompress_gz(file_path, dest_path=None):\n \"\"\"Decompress a gzip file.\"\"\"\n if dest_path:\n dest_filename = os.path.join(dest_path, os.path.basename(file_path[:-3]))\n else:\n dest_filename = file_path[:-3]\n\n gzipped_file = gzip.open(file_path, \"rb\")\n file_content = gzipped_file.read()\n gzipped_file.close()\n\n dest_file = open(dest_filename, \"wb\")\n dest_file.write(file_content)\n dest_file.close()\n\n return dest_path\n\n\ndef extract_7zip(path, dest, archive_type=None):\n _7zip_path = os.path.join(settings.RUNTIME_DIR, \"p7zip/7z\")\n if not system.path_exists(_7zip_path):\n _7zip_path = system.find_executable(\"7z\")\n if not system.path_exists(_7zip_path):\n raise OSError(\"7zip is not found in the lutris runtime or on the system\")\n command = [_7zip_path, \"x\", path, \"-o{}\".format(dest), \"-aoa\"]\n if archive_type:\n command.append(\"-t{}\".format(archive_type))\n subprocess.call(command)\n", "path": "lutris/util/extract.py"}], "after_files": [{"content": "import os\nimport uuid\nimport shutil\nimport tarfile\nimport subprocess\nimport gzip\nimport zlib\nfrom lutris.util import system\nfrom lutris.util.log import logger\nfrom lutris import settings\n\n\nclass ExtractFailure(Exception):\n \"\"\"Exception raised when and archive fails to extract\"\"\"\n\n\ndef is_7zip_supported(path, extractor):\n supported_extractors = (\n \"7z\",\n \"xz\",\n \"bzip2\",\n \"gzip\",\n \"tar\",\n \"zip\",\n \"ar\",\n \"arj\",\n \"cab\",\n \"chm\",\n \"cpio\",\n \"cramfs\",\n \"dmg\",\n \"ext\",\n \"fat\",\n \"gpt\",\n \"hfs\",\n \"ihex\",\n \"iso\",\n \"lzh\",\n \"lzma\",\n \"mbr\",\n \"msi\",\n \"nsis\",\n \"ntfs\",\n \"qcow2\",\n \"rar\",\n \"rpm\",\n \"squashfs\",\n \"udf\",\n \"uefi\",\n \"vdi\",\n \"vhd\",\n \"vmdk\",\n \"wim\",\n \"xar\",\n \"z\",\n )\n if extractor:\n return extractor.lower() in supported_extractors\n _base, ext = os.path.splitext(path)\n if ext:\n ext = ext.lstrip(\".\").lower()\n return ext in supported_extractors\n\n\ndef extract_archive(path, to_directory=\".\", merge_single=True, extractor=None):\n path = os.path.abspath(path)\n mode = None\n logger.debug(\"Extracting %s to %s\", path, to_directory)\n\n if extractor is None:\n if path.endswith(\".tar.gz\") or path.endswith(\".tgz\"):\n extractor = \"tgz\"\n elif path.endswith(\".tar.xz\") or path.endswith(\".txz\"):\n extractor = \"txz\"\n elif path.endswith(\".tar\"):\n extractor = \"tar\"\n elif path.endswith(\".tar.bz2\") or path.endswith(\".tbz\"):\n extractor = \"bz2\"\n elif path.endswith(\".gz\"):\n extractor = \"gzip\"\n elif is_7zip_supported(path, None):\n extractor = None\n else:\n raise RuntimeError(\n \"Could not extract `%s` - no appropriate extractor found\" % path\n )\n\n if extractor == \"tgz\":\n opener, mode = tarfile.open, \"r:gz\"\n elif extractor == \"txz\":\n opener, mode = tarfile.open, \"r:xz\"\n elif extractor == \"tar\":\n opener, mode = tarfile.open, \"r:\"\n elif extractor == \"bz2\":\n opener, mode = tarfile.open, \"r:bz2\"\n elif extractor == \"gzip\":\n decompress_gz(path, to_directory)\n return\n elif extractor is None or is_7zip_supported(path, extractor):\n opener = \"7zip\"\n else:\n raise RuntimeError(\n \"Could not extract `%s` - unknown format specified\" % path\n )\n\n temp_name = \".extract-\" + str(uuid.uuid4())[:8]\n temp_path = temp_dir = os.path.join(to_directory, temp_name)\n try:\n _do_extract(path, temp_path, opener, mode, extractor)\n except (OSError, zlib.error, tarfile.ReadError) as ex:\n logger.exception(\"Extraction failed: %s\", ex)\n raise ExtractFailure(str(ex))\n if merge_single:\n extracted = os.listdir(temp_path)\n if len(extracted) == 1:\n temp_path = os.path.join(temp_path, extracted[0])\n\n if os.path.isfile(temp_path):\n destination_path = os.path.join(to_directory, extracted[0])\n if os.path.isfile(destination_path):\n logger.warning(\"Overwrite existing file %s\", destination_path)\n os.remove(destination_path)\n shutil.move(temp_path, to_directory)\n os.removedirs(temp_dir)\n else:\n for archive_file in os.listdir(temp_path):\n source_path = os.path.join(temp_path, archive_file)\n destination_path = os.path.join(to_directory, archive_file)\n # logger.debug(\"Moving extracted files from %s to %s\", source_path, destination_path)\n\n if system.path_exists(destination_path):\n logger.warning(\"Overwrite existing path %s\", destination_path)\n if os.path.isfile(destination_path):\n os.remove(destination_path)\n shutil.move(source_path, destination_path)\n elif os.path.isdir(destination_path):\n try:\n system.merge_folders(source_path, destination_path)\n except OSError as ex:\n logger.error(\"Failed to merge to destination %s: %s\", destination_path, ex)\n raise ExtractFailure(str(ex))\n else:\n shutil.move(source_path, destination_path)\n system.remove_folder(temp_dir)\n logger.debug(\"Finished extracting %s to %s\", path, to_directory)\n return path, to_directory\n\n\ndef _do_extract(archive, dest, opener, mode=None, extractor=None):\n if opener == \"7zip\":\n extract_7zip(archive, dest, archive_type=extractor)\n else:\n handler = opener(archive, mode)\n handler.extractall(dest)\n handler.close()\n\n\ndef decompress_gz(file_path, dest_path=None):\n \"\"\"Decompress a gzip file.\"\"\"\n if dest_path:\n dest_filename = os.path.join(dest_path, os.path.basename(file_path[:-3]))\n else:\n dest_filename = file_path[:-3]\n\n gzipped_file = gzip.open(file_path, \"rb\")\n file_content = gzipped_file.read()\n gzipped_file.close()\n\n dest_file = open(dest_filename, \"wb\")\n dest_file.write(file_content)\n dest_file.close()\n\n return dest_path\n\n\ndef extract_7zip(path, dest, archive_type=None):\n _7zip_path = os.path.join(settings.RUNTIME_DIR, \"p7zip/7z\")\n if not system.path_exists(_7zip_path):\n _7zip_path = system.find_executable(\"7z\")\n if not system.path_exists(_7zip_path):\n raise OSError(\"7zip is not found in the lutris runtime or on the system\")\n command = [_7zip_path, \"x\", path, \"-o{}\".format(dest), \"-aoa\"]\n if archive_type:\n command.append(\"-t{}\".format(archive_type))\n subprocess.call(command)\n", "path": "lutris/util/extract.py"}]}
| 3,729 | 580 |
gh_patches_debug_978
|
rasdani/github-patches
|
git_diff
|
pwr-Solaar__Solaar-2003
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Solaar exits with RC 1 when terminated
**Information**
<!-- Make sure that your issue is not one of the known issues in the Solaar documentation at https://pwr-solaar.github.io/Solaar/ -->
<!-- Do not bother opening an issue for a version older than 1.1.0. Upgrade to the latest version and see if your issue persists. -->
<!-- If you not running the current version of Solaar, strongly consider upgrading to the newest version. -->
- Solaar version (`solaar --version` or `git describe --tags` if cloned from this repository): solaar 1.1.8+dfsg-2
- Distribution: Debian testing
- Kernel version (ex. `uname -srmo`): `Linux 6.1.0-3-amd64 x86_64 GNU/Linux`
- Output of `solaar show`:
<details>
```
```
</details>
- Contents of `~/.config/solaar/config.yaml` (or `~/.config/solaar/config.json` if `~/.config/solaar/config.yaml` not present):
<details>
```
CONTENTS HERE
```
</details>
- Errors or warrnings from Solaar:
<!-- Under normal operation Solaar keeps a log of warning and error messages in ~/.tmp
while it is running as a file starting with 'Solaar'.
If this file is not available or does not have useful information you can
run Solaar as `solaar -dd`, after killing any running Solaar processes to
have Solaar log informational, warning, and error messages to stdout. -->
**Describe the bug**
When terminated, solaar exits with RC 1. This results in a failed unit if systemd is managing XDG autostart. No log warnings or stderr messages produced.
**To Reproduce**
Steps to reproduce the behavior:
1. run `solaar ; echo $?`
2. run `killall solaar`
3. see rc 1 printed
Workaround:
```
cat << EOF > ~/.config/systemd/user/[email protected]/exit-tweak.conf
[Service]
SuccessExitStatus=0 1
EOF
systemctl --user daemon-reload
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lib/solaar/gtk.py`
Content:
```
1 #!/usr/bin/env python3
2 # -*- python-mode -*-
3 # -*- coding: UTF-8 -*-
4
5 ## Copyright (C) 2012-2013 Daniel Pavel
6 ##
7 ## This program is free software; you can redistribute it and/or modify
8 ## it under the terms of the GNU General Public License as published by
9 ## the Free Software Foundation; either version 2 of the License, or
10 ## (at your option) any later version.
11 ##
12 ## This program is distributed in the hope that it will be useful,
13 ## but WITHOUT ANY WARRANTY; without even the implied warranty of
14 ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 ## GNU General Public License for more details.
16 ##
17 ## You should have received a copy of the GNU General Public License along
18 ## with this program; if not, write to the Free Software Foundation, Inc.,
19 ## 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
20
21 import importlib
22 import logging
23 import os.path
24 import signal
25 import sys
26 import tempfile
27
28 from logging import INFO as _INFO
29 from logging import WARNING as _WARNING
30
31 import solaar.cli as _cli
32 import solaar.i18n as _i18n
33
34 from solaar import NAME, __version__
35
36 _log = logging.getLogger(__name__)
37
38 #
39 #
40 #
41
42
43 def _require(module, os_package, gi=None, gi_package=None, gi_version=None):
44 try:
45 if gi is not None:
46 gi.require_version(gi_package, gi_version)
47 return importlib.import_module(module)
48 except (ImportError, ValueError):
49 sys.exit('%s: missing required system package %s' % (NAME, os_package))
50
51
52 battery_icons_style = 'regular'
53 temp = tempfile.NamedTemporaryFile(prefix='Solaar_', mode='w', delete=True)
54
55
56 def _parse_arguments():
57 import argparse
58 arg_parser = argparse.ArgumentParser(
59 prog=NAME.lower(), epilog='For more information see https://pwr-solaar.github.io/Solaar'
60 )
61 arg_parser.add_argument(
62 '-d',
63 '--debug',
64 action='count',
65 default=0,
66 help='print logging messages, for debugging purposes (may be repeated for extra verbosity)'
67 )
68 arg_parser.add_argument(
69 '-D',
70 '--hidraw',
71 action='store',
72 dest='hidraw_path',
73 metavar='PATH',
74 help='unifying receiver to use; the first detected receiver if unspecified. Example: /dev/hidraw2'
75 )
76 arg_parser.add_argument('--restart-on-wake-up', action='store_true', help='restart Solaar on sleep wake-up (experimental)')
77 arg_parser.add_argument(
78 '-w', '--window', choices=('show', 'hide', 'only'), help='start with window showing / hidden / only (no tray icon)'
79 )
80 arg_parser.add_argument(
81 '-b',
82 '--battery-icons',
83 choices=('regular', 'symbolic', 'solaar'),
84 help='prefer regular battery / symbolic battery / solaar icons'
85 )
86 arg_parser.add_argument('--tray-icon-size', type=int, help='explicit size for tray icons')
87 arg_parser.add_argument('-V', '--version', action='version', version='%(prog)s ' + __version__)
88 arg_parser.add_argument('--help-actions', action='store_true', help='print help for the optional actions')
89 arg_parser.add_argument('action', nargs=argparse.REMAINDER, choices=_cli.actions, help='optional actions to perform')
90
91 args = arg_parser.parse_args()
92
93 if args.help_actions:
94 _cli.print_help()
95 return
96
97 if args.window is None:
98 args.window = 'show' # default behaviour is to show main window
99
100 global battery_icons_style
101 battery_icons_style = args.battery_icons if args.battery_icons is not None else 'regular'
102 global tray_icon_size
103 tray_icon_size = args.tray_icon_size
104
105 log_format = '%(asctime)s,%(msecs)03d %(levelname)8s [%(threadName)s] %(name)s: %(message)s'
106 log_level = logging.ERROR - 10 * args.debug
107 logging.getLogger('').setLevel(min(log_level, logging.WARNING))
108 file_handler = logging.StreamHandler(temp)
109 file_handler.setLevel(max(min(log_level, logging.WARNING), logging.INFO))
110 file_handler.setFormatter(logging.Formatter(log_format))
111 logging.getLogger('').addHandler(file_handler)
112 if args.debug > 0:
113 stream_handler = logging.StreamHandler()
114 stream_handler.setFormatter(logging.Formatter(log_format))
115 stream_handler.setLevel(log_level)
116 logging.getLogger('').addHandler(stream_handler)
117
118 if not args.action:
119 if _log.isEnabledFor(logging.INFO):
120 logging.info('language %s (%s), translations path %s', _i18n.language, _i18n.encoding, _i18n.path)
121
122 return args
123
124
125 # On first SIGINT, dump threads to stderr; on second, exit
126 def _handlesig(signl, stack):
127 import faulthandler
128 signal.signal(signal.SIGINT, signal.SIG_DFL)
129 signal.signal(signal.SIGTERM, signal.SIG_DFL)
130
131 if signl == int(signal.SIGINT):
132 if _log.isEnabledFor(_INFO):
133 faulthandler.dump_traceback()
134 sys.exit('%s: exit due to keyboard interrupt' % (NAME.lower()))
135 else:
136 sys.exit('')
137
138
139 def main():
140 _require('pyudev', 'python3-pyudev')
141
142 args = _parse_arguments()
143 if not args:
144 return
145 if args.action:
146 # if any argument, run comandline and exit
147 return _cli.run(args.action, args.hidraw_path)
148
149 gi = _require('gi', 'python3-gi (in Ubuntu) or python3-gobject (in Fedora)')
150 _require('gi.repository.Gtk', 'gir1.2-gtk-3.0', gi, 'Gtk', '3.0')
151
152 # handle ^C in console
153 signal.signal(signal.SIGINT, signal.SIG_DFL)
154 signal.signal(signal.SIGINT, _handlesig)
155 signal.signal(signal.SIGTERM, _handlesig)
156
157 udev_file = '42-logitech-unify-permissions.rules'
158 if _log.isEnabledFor(_WARNING) \
159 and not os.path.isfile('/etc/udev/rules.d/' + udev_file) \
160 and not os.path.isfile('/usr/lib/udev/rules.d/' + udev_file) \
161 and not os.path.isfile('/usr/local/lib/udev/rules.d/' + udev_file):
162 _log.warning('Solaar udev file not found in expected location')
163 _log.warning('See https://pwr-solaar.github.io/Solaar/installation for more information')
164 try:
165 import solaar.listener as listener
166 import solaar.ui as ui
167
168 listener.setup_scanner(ui.status_changed, ui.error_dialog)
169
170 import solaar.upower as _upower
171 if args.restart_on_wake_up:
172 _upower.watch(listener.start_all, listener.stop_all)
173 else:
174 _upower.watch(lambda: listener.ping_all(True))
175
176 import solaar.configuration as _configuration
177 _configuration.defer_saves = True # allow configuration saves to be deferred
178
179 # main UI event loop
180 ui.run_loop(listener.start_all, listener.stop_all, args.window != 'only', args.window != 'hide')
181 except Exception:
182 from traceback import format_exc
183 sys.exit('%s: error: %s' % (NAME.lower(), format_exc()))
184
185 temp.close()
186
187
188 if __name__ == '__main__':
189 main()
190
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/lib/solaar/gtk.py b/lib/solaar/gtk.py
--- a/lib/solaar/gtk.py
+++ b/lib/solaar/gtk.py
@@ -133,7 +133,7 @@
faulthandler.dump_traceback()
sys.exit('%s: exit due to keyboard interrupt' % (NAME.lower()))
else:
- sys.exit('')
+ sys.exit(0)
def main():
|
{"golden_diff": "diff --git a/lib/solaar/gtk.py b/lib/solaar/gtk.py\n--- a/lib/solaar/gtk.py\n+++ b/lib/solaar/gtk.py\n@@ -133,7 +133,7 @@\n faulthandler.dump_traceback()\n sys.exit('%s: exit due to keyboard interrupt' % (NAME.lower()))\n else:\n- sys.exit('')\n+ sys.exit(0)\n \n \n def main():\n", "issue": "Solaar exits with RC 1 when terminated\n**Information**\r\n<!-- Make sure that your issue is not one of the known issues in the Solaar documentation at https://pwr-solaar.github.io/Solaar/ -->\r\n<!-- Do not bother opening an issue for a version older than 1.1.0. Upgrade to the latest version and see if your issue persists. -->\r\n<!-- If you not running the current version of Solaar, strongly consider upgrading to the newest version. -->\r\n- Solaar version (`solaar --version` or `git describe --tags` if cloned from this repository): solaar 1.1.8+dfsg-2\r\n- Distribution: Debian testing\r\n- Kernel version (ex. `uname -srmo`): `Linux 6.1.0-3-amd64 x86_64 GNU/Linux`\r\n- Output of `solaar show`: \r\n\r\n<details>\r\n\r\n```\r\n\r\n```\r\n</details>\r\n\r\n- Contents of `~/.config/solaar/config.yaml` (or `~/.config/solaar/config.json` if `~/.config/solaar/config.yaml` not present):\r\n\r\n<details>\r\n\r\n```\r\nCONTENTS HERE\r\n```\r\n</details>\r\n\r\n\r\n- Errors or warrnings from Solaar:\r\n<!-- Under normal operation Solaar keeps a log of warning and error messages in ~/.tmp\r\nwhile it is running as a file starting with 'Solaar'.\r\nIf this file is not available or does not have useful information you can\r\nrun Solaar as `solaar -dd`, after killing any running Solaar processes to\r\nhave Solaar log informational, warning, and error messages to stdout. -->\r\n\r\n\r\n**Describe the bug**\r\nWhen terminated, solaar exits with RC 1. This results in a failed unit if systemd is managing XDG autostart. No log warnings or stderr messages produced.\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior: \r\n1. run `solaar ; echo $?`\r\n2. run `killall solaar`\r\n3. see rc 1 printed\r\n\r\nWorkaround:\r\n```\r\ncat << EOF > ~/.config/systemd/user/[email protected]/exit-tweak.conf \r\n[Service]\r\nSuccessExitStatus=0 1\r\nEOF\r\nsystemctl --user daemon-reload\r\n```\n", "before_files": [{"content": "#!/usr/bin/env python3\n# -*- python-mode -*-\n# -*- coding: UTF-8 -*-\n\n## Copyright (C) 2012-2013 Daniel Pavel\n##\n## This program is free software; you can redistribute it and/or modify\n## it under the terms of the GNU General Public License as published by\n## the Free Software Foundation; either version 2 of the License, or\n## (at your option) any later version.\n##\n## This program is distributed in the hope that it will be useful,\n## but WITHOUT ANY WARRANTY; without even the implied warranty of\n## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n## GNU General Public License for more details.\n##\n## You should have received a copy of the GNU General Public License along\n## with this program; if not, write to the Free Software Foundation, Inc.,\n## 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n\nimport importlib\nimport logging\nimport os.path\nimport signal\nimport sys\nimport tempfile\n\nfrom logging import INFO as _INFO\nfrom logging import WARNING as _WARNING\n\nimport solaar.cli as _cli\nimport solaar.i18n as _i18n\n\nfrom solaar import NAME, __version__\n\n_log = logging.getLogger(__name__)\n\n#\n#\n#\n\n\ndef _require(module, os_package, gi=None, gi_package=None, gi_version=None):\n try:\n if gi is not None:\n gi.require_version(gi_package, gi_version)\n return importlib.import_module(module)\n except (ImportError, ValueError):\n sys.exit('%s: missing required system package %s' % (NAME, os_package))\n\n\nbattery_icons_style = 'regular'\ntemp = tempfile.NamedTemporaryFile(prefix='Solaar_', mode='w', delete=True)\n\n\ndef _parse_arguments():\n import argparse\n arg_parser = argparse.ArgumentParser(\n prog=NAME.lower(), epilog='For more information see https://pwr-solaar.github.io/Solaar'\n )\n arg_parser.add_argument(\n '-d',\n '--debug',\n action='count',\n default=0,\n help='print logging messages, for debugging purposes (may be repeated for extra verbosity)'\n )\n arg_parser.add_argument(\n '-D',\n '--hidraw',\n action='store',\n dest='hidraw_path',\n metavar='PATH',\n help='unifying receiver to use; the first detected receiver if unspecified. Example: /dev/hidraw2'\n )\n arg_parser.add_argument('--restart-on-wake-up', action='store_true', help='restart Solaar on sleep wake-up (experimental)')\n arg_parser.add_argument(\n '-w', '--window', choices=('show', 'hide', 'only'), help='start with window showing / hidden / only (no tray icon)'\n )\n arg_parser.add_argument(\n '-b',\n '--battery-icons',\n choices=('regular', 'symbolic', 'solaar'),\n help='prefer regular battery / symbolic battery / solaar icons'\n )\n arg_parser.add_argument('--tray-icon-size', type=int, help='explicit size for tray icons')\n arg_parser.add_argument('-V', '--version', action='version', version='%(prog)s ' + __version__)\n arg_parser.add_argument('--help-actions', action='store_true', help='print help for the optional actions')\n arg_parser.add_argument('action', nargs=argparse.REMAINDER, choices=_cli.actions, help='optional actions to perform')\n\n args = arg_parser.parse_args()\n\n if args.help_actions:\n _cli.print_help()\n return\n\n if args.window is None:\n args.window = 'show' # default behaviour is to show main window\n\n global battery_icons_style\n battery_icons_style = args.battery_icons if args.battery_icons is not None else 'regular'\n global tray_icon_size\n tray_icon_size = args.tray_icon_size\n\n log_format = '%(asctime)s,%(msecs)03d %(levelname)8s [%(threadName)s] %(name)s: %(message)s'\n log_level = logging.ERROR - 10 * args.debug\n logging.getLogger('').setLevel(min(log_level, logging.WARNING))\n file_handler = logging.StreamHandler(temp)\n file_handler.setLevel(max(min(log_level, logging.WARNING), logging.INFO))\n file_handler.setFormatter(logging.Formatter(log_format))\n logging.getLogger('').addHandler(file_handler)\n if args.debug > 0:\n stream_handler = logging.StreamHandler()\n stream_handler.setFormatter(logging.Formatter(log_format))\n stream_handler.setLevel(log_level)\n logging.getLogger('').addHandler(stream_handler)\n\n if not args.action:\n if _log.isEnabledFor(logging.INFO):\n logging.info('language %s (%s), translations path %s', _i18n.language, _i18n.encoding, _i18n.path)\n\n return args\n\n\n# On first SIGINT, dump threads to stderr; on second, exit\ndef _handlesig(signl, stack):\n import faulthandler\n signal.signal(signal.SIGINT, signal.SIG_DFL)\n signal.signal(signal.SIGTERM, signal.SIG_DFL)\n\n if signl == int(signal.SIGINT):\n if _log.isEnabledFor(_INFO):\n faulthandler.dump_traceback()\n sys.exit('%s: exit due to keyboard interrupt' % (NAME.lower()))\n else:\n sys.exit('')\n\n\ndef main():\n _require('pyudev', 'python3-pyudev')\n\n args = _parse_arguments()\n if not args:\n return\n if args.action:\n # if any argument, run comandline and exit\n return _cli.run(args.action, args.hidraw_path)\n\n gi = _require('gi', 'python3-gi (in Ubuntu) or python3-gobject (in Fedora)')\n _require('gi.repository.Gtk', 'gir1.2-gtk-3.0', gi, 'Gtk', '3.0')\n\n # handle ^C in console\n signal.signal(signal.SIGINT, signal.SIG_DFL)\n signal.signal(signal.SIGINT, _handlesig)\n signal.signal(signal.SIGTERM, _handlesig)\n\n udev_file = '42-logitech-unify-permissions.rules'\n if _log.isEnabledFor(_WARNING) \\\n and not os.path.isfile('/etc/udev/rules.d/' + udev_file) \\\n and not os.path.isfile('/usr/lib/udev/rules.d/' + udev_file) \\\n and not os.path.isfile('/usr/local/lib/udev/rules.d/' + udev_file):\n _log.warning('Solaar udev file not found in expected location')\n _log.warning('See https://pwr-solaar.github.io/Solaar/installation for more information')\n try:\n import solaar.listener as listener\n import solaar.ui as ui\n\n listener.setup_scanner(ui.status_changed, ui.error_dialog)\n\n import solaar.upower as _upower\n if args.restart_on_wake_up:\n _upower.watch(listener.start_all, listener.stop_all)\n else:\n _upower.watch(lambda: listener.ping_all(True))\n\n import solaar.configuration as _configuration\n _configuration.defer_saves = True # allow configuration saves to be deferred\n\n # main UI event loop\n ui.run_loop(listener.start_all, listener.stop_all, args.window != 'only', args.window != 'hide')\n except Exception:\n from traceback import format_exc\n sys.exit('%s: error: %s' % (NAME.lower(), format_exc()))\n\n temp.close()\n\n\nif __name__ == '__main__':\n main()\n", "path": "lib/solaar/gtk.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n# -*- python-mode -*-\n# -*- coding: UTF-8 -*-\n\n## Copyright (C) 2012-2013 Daniel Pavel\n##\n## This program is free software; you can redistribute it and/or modify\n## it under the terms of the GNU General Public License as published by\n## the Free Software Foundation; either version 2 of the License, or\n## (at your option) any later version.\n##\n## This program is distributed in the hope that it will be useful,\n## but WITHOUT ANY WARRANTY; without even the implied warranty of\n## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n## GNU General Public License for more details.\n##\n## You should have received a copy of the GNU General Public License along\n## with this program; if not, write to the Free Software Foundation, Inc.,\n## 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n\nimport importlib\nimport logging\nimport os.path\nimport signal\nimport sys\nimport tempfile\n\nfrom logging import INFO as _INFO\nfrom logging import WARNING as _WARNING\n\nimport solaar.cli as _cli\nimport solaar.i18n as _i18n\n\nfrom solaar import NAME, __version__\n\n_log = logging.getLogger(__name__)\n\n#\n#\n#\n\n\ndef _require(module, os_package, gi=None, gi_package=None, gi_version=None):\n try:\n if gi is not None:\n gi.require_version(gi_package, gi_version)\n return importlib.import_module(module)\n except (ImportError, ValueError):\n sys.exit('%s: missing required system package %s' % (NAME, os_package))\n\n\nbattery_icons_style = 'regular'\ntemp = tempfile.NamedTemporaryFile(prefix='Solaar_', mode='w', delete=True)\n\n\ndef _parse_arguments():\n import argparse\n arg_parser = argparse.ArgumentParser(\n prog=NAME.lower(), epilog='For more information see https://pwr-solaar.github.io/Solaar'\n )\n arg_parser.add_argument(\n '-d',\n '--debug',\n action='count',\n default=0,\n help='print logging messages, for debugging purposes (may be repeated for extra verbosity)'\n )\n arg_parser.add_argument(\n '-D',\n '--hidraw',\n action='store',\n dest='hidraw_path',\n metavar='PATH',\n help='unifying receiver to use; the first detected receiver if unspecified. Example: /dev/hidraw2'\n )\n arg_parser.add_argument('--restart-on-wake-up', action='store_true', help='restart Solaar on sleep wake-up (experimental)')\n arg_parser.add_argument(\n '-w', '--window', choices=('show', 'hide', 'only'), help='start with window showing / hidden / only (no tray icon)'\n )\n arg_parser.add_argument(\n '-b',\n '--battery-icons',\n choices=('regular', 'symbolic', 'solaar'),\n help='prefer regular battery / symbolic battery / solaar icons'\n )\n arg_parser.add_argument('--tray-icon-size', type=int, help='explicit size for tray icons')\n arg_parser.add_argument('-V', '--version', action='version', version='%(prog)s ' + __version__)\n arg_parser.add_argument('--help-actions', action='store_true', help='print help for the optional actions')\n arg_parser.add_argument('action', nargs=argparse.REMAINDER, choices=_cli.actions, help='optional actions to perform')\n\n args = arg_parser.parse_args()\n\n if args.help_actions:\n _cli.print_help()\n return\n\n if args.window is None:\n args.window = 'show' # default behaviour is to show main window\n\n global battery_icons_style\n battery_icons_style = args.battery_icons if args.battery_icons is not None else 'regular'\n global tray_icon_size\n tray_icon_size = args.tray_icon_size\n\n log_format = '%(asctime)s,%(msecs)03d %(levelname)8s [%(threadName)s] %(name)s: %(message)s'\n log_level = logging.ERROR - 10 * args.debug\n logging.getLogger('').setLevel(min(log_level, logging.WARNING))\n file_handler = logging.StreamHandler(temp)\n file_handler.setLevel(max(min(log_level, logging.WARNING), logging.INFO))\n file_handler.setFormatter(logging.Formatter(log_format))\n logging.getLogger('').addHandler(file_handler)\n if args.debug > 0:\n stream_handler = logging.StreamHandler()\n stream_handler.setFormatter(logging.Formatter(log_format))\n stream_handler.setLevel(log_level)\n logging.getLogger('').addHandler(stream_handler)\n\n if not args.action:\n if _log.isEnabledFor(logging.INFO):\n logging.info('language %s (%s), translations path %s', _i18n.language, _i18n.encoding, _i18n.path)\n\n return args\n\n\n# On first SIGINT, dump threads to stderr; on second, exit\ndef _handlesig(signl, stack):\n import faulthandler\n signal.signal(signal.SIGINT, signal.SIG_DFL)\n signal.signal(signal.SIGTERM, signal.SIG_DFL)\n\n if signl == int(signal.SIGINT):\n if _log.isEnabledFor(_INFO):\n faulthandler.dump_traceback()\n sys.exit('%s: exit due to keyboard interrupt' % (NAME.lower()))\n else:\n sys.exit(0)\n\n\ndef main():\n _require('pyudev', 'python3-pyudev')\n\n args = _parse_arguments()\n if not args:\n return\n if args.action:\n # if any argument, run comandline and exit\n return _cli.run(args.action, args.hidraw_path)\n\n gi = _require('gi', 'python3-gi (in Ubuntu) or python3-gobject (in Fedora)')\n _require('gi.repository.Gtk', 'gir1.2-gtk-3.0', gi, 'Gtk', '3.0')\n\n # handle ^C in console\n signal.signal(signal.SIGINT, signal.SIG_DFL)\n signal.signal(signal.SIGINT, _handlesig)\n signal.signal(signal.SIGTERM, _handlesig)\n\n udev_file = '42-logitech-unify-permissions.rules'\n if _log.isEnabledFor(_WARNING) \\\n and not os.path.isfile('/etc/udev/rules.d/' + udev_file) \\\n and not os.path.isfile('/usr/lib/udev/rules.d/' + udev_file) \\\n and not os.path.isfile('/usr/local/lib/udev/rules.d/' + udev_file):\n _log.warning('Solaar udev file not found in expected location')\n _log.warning('See https://pwr-solaar.github.io/Solaar/installation for more information')\n try:\n import solaar.listener as listener\n import solaar.ui as ui\n\n listener.setup_scanner(ui.status_changed, ui.error_dialog)\n\n import solaar.upower as _upower\n if args.restart_on_wake_up:\n _upower.watch(listener.start_all, listener.stop_all)\n else:\n _upower.watch(lambda: listener.ping_all(True))\n\n import solaar.configuration as _configuration\n _configuration.defer_saves = True # allow configuration saves to be deferred\n\n # main UI event loop\n ui.run_loop(listener.start_all, listener.stop_all, args.window != 'only', args.window != 'hide')\n except Exception:\n from traceback import format_exc\n sys.exit('%s: error: %s' % (NAME.lower(), format_exc()))\n\n temp.close()\n\n\nif __name__ == '__main__':\n main()\n", "path": "lib/solaar/gtk.py"}]}
| 2,867 | 99 |
gh_patches_debug_37875
|
rasdani/github-patches
|
git_diff
|
ytdl-org__youtube-dl-27937
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[ADN] Can't download premium video after extractor fix
<!--
######################################################################
WARNING!
IGNORING THE FOLLOWING TEMPLATE WILL RESULT IN ISSUE CLOSED AS INCOMPLETE
######################################################################
-->
## Checklist
<!--
Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
- Look through the README (http://yt-dl.org/readme) and FAQ (http://yt-dl.org/faq) for similar questions
- Search the bugtracker for similar questions: http://yt-dl.org/search-issues
- Finally, put x into all relevant boxes (like this [x])
-->
- [x] I'm asking a question
- [x] I've looked through the README and FAQ for similar questions
- [x] I've searched the bugtracker for similar questions including closed ones
## Question
<!--
Ask your question in an arbitrary form. Please make sure it's worded well enough to be understood, see https://github.com/ytdl-org/youtube-dl#is-the-description-of-the-issue-itself-sufficient.
-->
First of all I would like to thank the person who fixed the ADN extractor.
Everything works perfectly to download the free videos in 480p. But to download videos requiring a premium account or to download better quality I can't do it.
Before I used to use the command :
`youtube-dl.exe --cookies ./cookies.txt https://animedigitalnetwork.fr/video/my-hero-academia-saison-4/10271-episode-2-overhaul`
And it worked perfectly but after the extractor fix it doesn't work anymore I tried to re-download the cookies as indicated in the doc but it doesn't change anything I also tried to use `--username` and `--password` but it doesn't do anything either and it shows me in the both case:
`ERROR: This video is only available for paying users`
I think the problem comes from the cookies that don't have the same shape as before, there were 4 lines in my file but now there is only one.
Could someone help me?
Thank you.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `youtube_dl/extractor/adn.py`
Content:
```
1 # coding: utf-8
2 from __future__ import unicode_literals
3
4 import base64
5 import binascii
6 import json
7 import os
8 import random
9
10 from .common import InfoExtractor
11 from ..aes import aes_cbc_decrypt
12 from ..compat import (
13 compat_HTTPError,
14 compat_b64decode,
15 compat_ord,
16 )
17 from ..utils import (
18 bytes_to_intlist,
19 bytes_to_long,
20 ExtractorError,
21 float_or_none,
22 int_or_none,
23 intlist_to_bytes,
24 long_to_bytes,
25 pkcs1pad,
26 strip_or_none,
27 try_get,
28 unified_strdate,
29 )
30
31
32 class ADNIE(InfoExtractor):
33 IE_DESC = 'Anime Digital Network'
34 _VALID_URL = r'https?://(?:www\.)?animedigitalnetwork\.fr/video/[^/]+/(?P<id>\d+)'
35 _TEST = {
36 'url': 'http://animedigitalnetwork.fr/video/blue-exorcist-kyoto-saga/7778-episode-1-debut-des-hostilites',
37 'md5': '0319c99885ff5547565cacb4f3f9348d',
38 'info_dict': {
39 'id': '7778',
40 'ext': 'mp4',
41 'title': 'Blue Exorcist - Kyôto Saga - Episode 1',
42 'description': 'md5:2f7b5aa76edbc1a7a92cedcda8a528d5',
43 'series': 'Blue Exorcist - Kyôto Saga',
44 'duration': 1467,
45 'release_date': '20170106',
46 'comment_count': int,
47 'average_rating': float,
48 'season_number': 2,
49 'episode': 'Début des hostilités',
50 'episode_number': 1,
51 }
52 }
53
54 _BASE_URL = 'http://animedigitalnetwork.fr'
55 _API_BASE_URL = 'https://gw.api.animedigitalnetwork.fr/'
56 _PLAYER_BASE_URL = _API_BASE_URL + 'player/'
57 _RSA_KEY = (0x9B42B08905199A5CCE2026274399CA560ECB209EE9878A708B1C0812E1BB8CB5D1FB7441861147C1A1F2F3A0476DD63A9CAC20D3E983613346850AA6CB38F16DC7D720FD7D86FC6E5B3D5BBC72E14CD0BF9E869F2CEA2CCAD648F1DCE38F1FF916CEFB2D339B64AA0264372344BC775E265E8A852F88144AB0BD9AA06C1A4ABB, 65537)
58 _POS_ALIGN_MAP = {
59 'start': 1,
60 'end': 3,
61 }
62 _LINE_ALIGN_MAP = {
63 'middle': 8,
64 'end': 4,
65 }
66
67 @staticmethod
68 def _ass_subtitles_timecode(seconds):
69 return '%01d:%02d:%02d.%02d' % (seconds / 3600, (seconds % 3600) / 60, seconds % 60, (seconds % 1) * 100)
70
71 def _get_subtitles(self, sub_url, video_id):
72 if not sub_url:
73 return None
74
75 enc_subtitles = self._download_webpage(
76 sub_url, video_id, 'Downloading subtitles location', fatal=False) or '{}'
77 subtitle_location = (self._parse_json(enc_subtitles, video_id, fatal=False) or {}).get('location')
78 if subtitle_location:
79 enc_subtitles = self._download_webpage(
80 subtitle_location, video_id, 'Downloading subtitles data',
81 fatal=False, headers={'Origin': 'https://animedigitalnetwork.fr'})
82 if not enc_subtitles:
83 return None
84
85 # http://animedigitalnetwork.fr/components/com_vodvideo/videojs/adn-vjs.min.js
86 dec_subtitles = intlist_to_bytes(aes_cbc_decrypt(
87 bytes_to_intlist(compat_b64decode(enc_subtitles[24:])),
88 bytes_to_intlist(binascii.unhexlify(self._K + 'ab9f52f5baae7c72')),
89 bytes_to_intlist(compat_b64decode(enc_subtitles[:24]))
90 ))
91 subtitles_json = self._parse_json(
92 dec_subtitles[:-compat_ord(dec_subtitles[-1])].decode(),
93 None, fatal=False)
94 if not subtitles_json:
95 return None
96
97 subtitles = {}
98 for sub_lang, sub in subtitles_json.items():
99 ssa = '''[Script Info]
100 ScriptType:V4.00
101 [V4 Styles]
102 Format: Name,Fontname,Fontsize,PrimaryColour,SecondaryColour,TertiaryColour,BackColour,Bold,Italic,BorderStyle,Outline,Shadow,Alignment,MarginL,MarginR,MarginV,AlphaLevel,Encoding
103 Style: Default,Arial,18,16777215,16777215,16777215,0,-1,0,1,1,0,2,20,20,20,0,0
104 [Events]
105 Format: Marked,Start,End,Style,Name,MarginL,MarginR,MarginV,Effect,Text'''
106 for current in sub:
107 start, end, text, line_align, position_align = (
108 float_or_none(current.get('startTime')),
109 float_or_none(current.get('endTime')),
110 current.get('text'), current.get('lineAlign'),
111 current.get('positionAlign'))
112 if start is None or end is None or text is None:
113 continue
114 alignment = self._POS_ALIGN_MAP.get(position_align, 2) + self._LINE_ALIGN_MAP.get(line_align, 0)
115 ssa += os.linesep + 'Dialogue: Marked=0,%s,%s,Default,,0,0,0,,%s%s' % (
116 self._ass_subtitles_timecode(start),
117 self._ass_subtitles_timecode(end),
118 '{\\a%d}' % alignment if alignment != 2 else '',
119 text.replace('\n', '\\N').replace('<i>', '{\\i1}').replace('</i>', '{\\i0}'))
120
121 if sub_lang == 'vostf':
122 sub_lang = 'fr'
123 subtitles.setdefault(sub_lang, []).extend([{
124 'ext': 'json',
125 'data': json.dumps(sub),
126 }, {
127 'ext': 'ssa',
128 'data': ssa,
129 }])
130 return subtitles
131
132 def _real_extract(self, url):
133 video_id = self._match_id(url)
134 video_base_url = self._PLAYER_BASE_URL + 'video/%s/' % video_id
135 player = self._download_json(
136 video_base_url + 'configuration', video_id,
137 'Downloading player config JSON metadata')['player']
138 options = player['options']
139
140 user = options['user']
141 if not user.get('hasAccess'):
142 raise ExtractorError(
143 'This video is only available for paying users', expected=True)
144 # self.raise_login_required() # FIXME: Login is not implemented
145
146 token = self._download_json(
147 user.get('refreshTokenUrl') or (self._PLAYER_BASE_URL + 'refresh/token'),
148 video_id, 'Downloading access token', headers={
149 'x-player-refresh-token': user['refreshToken']
150 }, data=b'')['token']
151
152 links_url = try_get(options, lambda x: x['video']['url']) or (video_base_url + 'link')
153 self._K = ''.join([random.choice('0123456789abcdef') for _ in range(16)])
154 message = bytes_to_intlist(json.dumps({
155 'k': self._K,
156 't': token,
157 }))
158
159 # Sometimes authentication fails for no good reason, retry with
160 # a different random padding
161 links_data = None
162 for _ in range(3):
163 padded_message = intlist_to_bytes(pkcs1pad(message, 128))
164 n, e = self._RSA_KEY
165 encrypted_message = long_to_bytes(pow(bytes_to_long(padded_message), e, n))
166 authorization = base64.b64encode(encrypted_message).decode()
167
168 try:
169 links_data = self._download_json(
170 links_url, video_id, 'Downloading links JSON metadata', headers={
171 'X-Player-Token': authorization
172 }, query={
173 'freeWithAds': 'true',
174 'adaptive': 'false',
175 'withMetadata': 'true',
176 'source': 'Web'
177 })
178 break
179 except ExtractorError as e:
180 if not isinstance(e.cause, compat_HTTPError):
181 raise e
182
183 if e.cause.code == 401:
184 # This usually goes away with a different random pkcs1pad, so retry
185 continue
186
187 error = self._parse_json(e.cause.read(), video_id)
188 message = error.get('message')
189 if e.cause.code == 403 and error.get('code') == 'player-bad-geolocation-country':
190 self.raise_geo_restricted(msg=message)
191 else:
192 raise ExtractorError(message)
193 else:
194 raise ExtractorError('Giving up retrying')
195
196 links = links_data.get('links') or {}
197 metas = links_data.get('metadata') or {}
198 sub_url = (links.get('subtitles') or {}).get('all')
199 video_info = links_data.get('video') or {}
200 title = metas['title']
201
202 formats = []
203 for format_id, qualities in (links.get('streaming') or {}).items():
204 if not isinstance(qualities, dict):
205 continue
206 for quality, load_balancer_url in qualities.items():
207 load_balancer_data = self._download_json(
208 load_balancer_url, video_id,
209 'Downloading %s %s JSON metadata' % (format_id, quality),
210 fatal=False) or {}
211 m3u8_url = load_balancer_data.get('location')
212 if not m3u8_url:
213 continue
214 m3u8_formats = self._extract_m3u8_formats(
215 m3u8_url, video_id, 'mp4', 'm3u8_native',
216 m3u8_id=format_id, fatal=False)
217 if format_id == 'vf':
218 for f in m3u8_formats:
219 f['language'] = 'fr'
220 formats.extend(m3u8_formats)
221 self._sort_formats(formats)
222
223 video = (self._download_json(
224 self._API_BASE_URL + 'video/%s' % video_id, video_id,
225 'Downloading additional video metadata', fatal=False) or {}).get('video') or {}
226 show = video.get('show') or {}
227
228 return {
229 'id': video_id,
230 'title': title,
231 'description': strip_or_none(metas.get('summary') or video.get('summary')),
232 'thumbnail': video_info.get('image') or player.get('image'),
233 'formats': formats,
234 'subtitles': self.extract_subtitles(sub_url, video_id),
235 'episode': metas.get('subtitle') or video.get('name'),
236 'episode_number': int_or_none(video.get('shortNumber')),
237 'series': show.get('title'),
238 'season_number': int_or_none(video.get('season')),
239 'duration': int_or_none(video_info.get('duration') or video.get('duration')),
240 'release_date': unified_strdate(video.get('releaseDate')),
241 'average_rating': float_or_none(video.get('rating') or metas.get('rating')),
242 'comment_count': int_or_none(video.get('commentsCount')),
243 }
244
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/youtube_dl/extractor/adn.py b/youtube_dl/extractor/adn.py
--- a/youtube_dl/extractor/adn.py
+++ b/youtube_dl/extractor/adn.py
@@ -26,6 +26,7 @@
strip_or_none,
try_get,
unified_strdate,
+ urlencode_postdata,
)
@@ -51,9 +52,11 @@
}
}
+ _NETRC_MACHINE = 'animedigitalnetwork'
_BASE_URL = 'http://animedigitalnetwork.fr'
_API_BASE_URL = 'https://gw.api.animedigitalnetwork.fr/'
_PLAYER_BASE_URL = _API_BASE_URL + 'player/'
+ _HEADERS = {}
_RSA_KEY = (0x9B42B08905199A5CCE2026274399CA560ECB209EE9878A708B1C0812E1BB8CB5D1FB7441861147C1A1F2F3A0476DD63A9CAC20D3E983613346850AA6CB38F16DC7D720FD7D86FC6E5B3D5BBC72E14CD0BF9E869F2CEA2CCAD648F1DCE38F1FF916CEFB2D339B64AA0264372344BC775E265E8A852F88144AB0BD9AA06C1A4ABB, 65537)
_POS_ALIGN_MAP = {
'start': 1,
@@ -129,19 +132,32 @@
}])
return subtitles
+ def _real_initialize(self):
+ username, password = self._get_login_info()
+ if username:
+ access_token = (self._download_json(
+ self._API_BASE_URL + 'authentication/login', None,
+ 'Logging in', errnote='Unable to log in', fatal=False,
+ data=urlencode_postdata({
+ 'password': password,
+ 'rememberMe': False,
+ 'source': 'Web',
+ 'username': username,
+ })) or {}).get('accessToken')
+ if access_token:
+ self._HEADERS = {'authorization': 'Bearer ' + access_token}
+
def _real_extract(self, url):
video_id = self._match_id(url)
video_base_url = self._PLAYER_BASE_URL + 'video/%s/' % video_id
player = self._download_json(
video_base_url + 'configuration', video_id,
- 'Downloading player config JSON metadata')['player']
+ 'Downloading player config JSON metadata', headers=self._HEADERS)['player']
options = player['options']
user = options['user']
if not user.get('hasAccess'):
- raise ExtractorError(
- 'This video is only available for paying users', expected=True)
- # self.raise_login_required() # FIXME: Login is not implemented
+ self.raise_login_required()
token = self._download_json(
user.get('refreshTokenUrl') or (self._PLAYER_BASE_URL + 'refresh/token'),
|
{"golden_diff": "diff --git a/youtube_dl/extractor/adn.py b/youtube_dl/extractor/adn.py\n--- a/youtube_dl/extractor/adn.py\n+++ b/youtube_dl/extractor/adn.py\n@@ -26,6 +26,7 @@\n strip_or_none,\n try_get,\n unified_strdate,\n+ urlencode_postdata,\n )\n \n \n@@ -51,9 +52,11 @@\n }\n }\n \n+ _NETRC_MACHINE = 'animedigitalnetwork'\n _BASE_URL = 'http://animedigitalnetwork.fr'\n _API_BASE_URL = 'https://gw.api.animedigitalnetwork.fr/'\n _PLAYER_BASE_URL = _API_BASE_URL + 'player/'\n+ _HEADERS = {}\n _RSA_KEY = (0x9B42B08905199A5CCE2026274399CA560ECB209EE9878A708B1C0812E1BB8CB5D1FB7441861147C1A1F2F3A0476DD63A9CAC20D3E983613346850AA6CB38F16DC7D720FD7D86FC6E5B3D5BBC72E14CD0BF9E869F2CEA2CCAD648F1DCE38F1FF916CEFB2D339B64AA0264372344BC775E265E8A852F88144AB0BD9AA06C1A4ABB, 65537)\n _POS_ALIGN_MAP = {\n 'start': 1,\n@@ -129,19 +132,32 @@\n }])\n return subtitles\n \n+ def _real_initialize(self):\n+ username, password = self._get_login_info()\n+ if username:\n+ access_token = (self._download_json(\n+ self._API_BASE_URL + 'authentication/login', None,\n+ 'Logging in', errnote='Unable to log in', fatal=False,\n+ data=urlencode_postdata({\n+ 'password': password,\n+ 'rememberMe': False,\n+ 'source': 'Web',\n+ 'username': username,\n+ })) or {}).get('accessToken')\n+ if access_token:\n+ self._HEADERS = {'authorization': 'Bearer ' + access_token}\n+\n def _real_extract(self, url):\n video_id = self._match_id(url)\n video_base_url = self._PLAYER_BASE_URL + 'video/%s/' % video_id\n player = self._download_json(\n video_base_url + 'configuration', video_id,\n- 'Downloading player config JSON metadata')['player']\n+ 'Downloading player config JSON metadata', headers=self._HEADERS)['player']\n options = player['options']\n \n user = options['user']\n if not user.get('hasAccess'):\n- raise ExtractorError(\n- 'This video is only available for paying users', expected=True)\n- # self.raise_login_required() # FIXME: Login is not implemented\n+ self.raise_login_required()\n \n token = self._download_json(\n user.get('refreshTokenUrl') or (self._PLAYER_BASE_URL + 'refresh/token'),\n", "issue": "[ADN] Can't download premium video after extractor fix\n<!--\r\n\r\n######################################################################\r\n WARNING!\r\n IGNORING THE FOLLOWING TEMPLATE WILL RESULT IN ISSUE CLOSED AS INCOMPLETE\r\n######################################################################\r\n\r\n-->\r\n\r\n\r\n## Checklist\r\n\r\n<!--\r\nCarefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:\r\n- Look through the README (http://yt-dl.org/readme) and FAQ (http://yt-dl.org/faq) for similar questions\r\n- Search the bugtracker for similar questions: http://yt-dl.org/search-issues\r\n- Finally, put x into all relevant boxes (like this [x])\r\n-->\r\n\r\n- [x] I'm asking a question\r\n- [x] I've looked through the README and FAQ for similar questions\r\n- [x] I've searched the bugtracker for similar questions including closed ones\r\n\r\n\r\n## Question\r\n\r\n<!--\r\nAsk your question in an arbitrary form. Please make sure it's worded well enough to be understood, see https://github.com/ytdl-org/youtube-dl#is-the-description-of-the-issue-itself-sufficient.\r\n-->\r\n\r\nFirst of all I would like to thank the person who fixed the ADN extractor.\r\n\r\nEverything works perfectly to download the free videos in 480p. But to download videos requiring a premium account or to download better quality I can't do it.\r\n\r\nBefore I used to use the command :\r\n\r\n`youtube-dl.exe --cookies ./cookies.txt https://animedigitalnetwork.fr/video/my-hero-academia-saison-4/10271-episode-2-overhaul`\r\n\r\nAnd it worked perfectly but after the extractor fix it doesn't work anymore I tried to re-download the cookies as indicated in the doc but it doesn't change anything I also tried to use `--username` and `--password` but it doesn't do anything either and it shows me in the both case:\r\n\r\n`ERROR: This video is only available for paying users`\r\n\r\nI think the problem comes from the cookies that don't have the same shape as before, there were 4 lines in my file but now there is only one.\r\n\r\nCould someone help me?\r\n\r\nThank you.\n", "before_files": [{"content": "# coding: utf-8\nfrom __future__ import unicode_literals\n\nimport base64\nimport binascii\nimport json\nimport os\nimport random\n\nfrom .common import InfoExtractor\nfrom ..aes import aes_cbc_decrypt\nfrom ..compat import (\n compat_HTTPError,\n compat_b64decode,\n compat_ord,\n)\nfrom ..utils import (\n bytes_to_intlist,\n bytes_to_long,\n ExtractorError,\n float_or_none,\n int_or_none,\n intlist_to_bytes,\n long_to_bytes,\n pkcs1pad,\n strip_or_none,\n try_get,\n unified_strdate,\n)\n\n\nclass ADNIE(InfoExtractor):\n IE_DESC = 'Anime Digital Network'\n _VALID_URL = r'https?://(?:www\\.)?animedigitalnetwork\\.fr/video/[^/]+/(?P<id>\\d+)'\n _TEST = {\n 'url': 'http://animedigitalnetwork.fr/video/blue-exorcist-kyoto-saga/7778-episode-1-debut-des-hostilites',\n 'md5': '0319c99885ff5547565cacb4f3f9348d',\n 'info_dict': {\n 'id': '7778',\n 'ext': 'mp4',\n 'title': 'Blue Exorcist - Ky\u00f4to Saga - Episode 1',\n 'description': 'md5:2f7b5aa76edbc1a7a92cedcda8a528d5',\n 'series': 'Blue Exorcist - Ky\u00f4to Saga',\n 'duration': 1467,\n 'release_date': '20170106',\n 'comment_count': int,\n 'average_rating': float,\n 'season_number': 2,\n 'episode': 'D\u00e9but des hostilit\u00e9s',\n 'episode_number': 1,\n }\n }\n\n _BASE_URL = 'http://animedigitalnetwork.fr'\n _API_BASE_URL = 'https://gw.api.animedigitalnetwork.fr/'\n _PLAYER_BASE_URL = _API_BASE_URL + 'player/'\n _RSA_KEY = (0x9B42B08905199A5CCE2026274399CA560ECB209EE9878A708B1C0812E1BB8CB5D1FB7441861147C1A1F2F3A0476DD63A9CAC20D3E983613346850AA6CB38F16DC7D720FD7D86FC6E5B3D5BBC72E14CD0BF9E869F2CEA2CCAD648F1DCE38F1FF916CEFB2D339B64AA0264372344BC775E265E8A852F88144AB0BD9AA06C1A4ABB, 65537)\n _POS_ALIGN_MAP = {\n 'start': 1,\n 'end': 3,\n }\n _LINE_ALIGN_MAP = {\n 'middle': 8,\n 'end': 4,\n }\n\n @staticmethod\n def _ass_subtitles_timecode(seconds):\n return '%01d:%02d:%02d.%02d' % (seconds / 3600, (seconds % 3600) / 60, seconds % 60, (seconds % 1) * 100)\n\n def _get_subtitles(self, sub_url, video_id):\n if not sub_url:\n return None\n\n enc_subtitles = self._download_webpage(\n sub_url, video_id, 'Downloading subtitles location', fatal=False) or '{}'\n subtitle_location = (self._parse_json(enc_subtitles, video_id, fatal=False) or {}).get('location')\n if subtitle_location:\n enc_subtitles = self._download_webpage(\n subtitle_location, video_id, 'Downloading subtitles data',\n fatal=False, headers={'Origin': 'https://animedigitalnetwork.fr'})\n if not enc_subtitles:\n return None\n\n # http://animedigitalnetwork.fr/components/com_vodvideo/videojs/adn-vjs.min.js\n dec_subtitles = intlist_to_bytes(aes_cbc_decrypt(\n bytes_to_intlist(compat_b64decode(enc_subtitles[24:])),\n bytes_to_intlist(binascii.unhexlify(self._K + 'ab9f52f5baae7c72')),\n bytes_to_intlist(compat_b64decode(enc_subtitles[:24]))\n ))\n subtitles_json = self._parse_json(\n dec_subtitles[:-compat_ord(dec_subtitles[-1])].decode(),\n None, fatal=False)\n if not subtitles_json:\n return None\n\n subtitles = {}\n for sub_lang, sub in subtitles_json.items():\n ssa = '''[Script Info]\nScriptType:V4.00\n[V4 Styles]\nFormat: Name,Fontname,Fontsize,PrimaryColour,SecondaryColour,TertiaryColour,BackColour,Bold,Italic,BorderStyle,Outline,Shadow,Alignment,MarginL,MarginR,MarginV,AlphaLevel,Encoding\nStyle: Default,Arial,18,16777215,16777215,16777215,0,-1,0,1,1,0,2,20,20,20,0,0\n[Events]\nFormat: Marked,Start,End,Style,Name,MarginL,MarginR,MarginV,Effect,Text'''\n for current in sub:\n start, end, text, line_align, position_align = (\n float_or_none(current.get('startTime')),\n float_or_none(current.get('endTime')),\n current.get('text'), current.get('lineAlign'),\n current.get('positionAlign'))\n if start is None or end is None or text is None:\n continue\n alignment = self._POS_ALIGN_MAP.get(position_align, 2) + self._LINE_ALIGN_MAP.get(line_align, 0)\n ssa += os.linesep + 'Dialogue: Marked=0,%s,%s,Default,,0,0,0,,%s%s' % (\n self._ass_subtitles_timecode(start),\n self._ass_subtitles_timecode(end),\n '{\\\\a%d}' % alignment if alignment != 2 else '',\n text.replace('\\n', '\\\\N').replace('<i>', '{\\\\i1}').replace('</i>', '{\\\\i0}'))\n\n if sub_lang == 'vostf':\n sub_lang = 'fr'\n subtitles.setdefault(sub_lang, []).extend([{\n 'ext': 'json',\n 'data': json.dumps(sub),\n }, {\n 'ext': 'ssa',\n 'data': ssa,\n }])\n return subtitles\n\n def _real_extract(self, url):\n video_id = self._match_id(url)\n video_base_url = self._PLAYER_BASE_URL + 'video/%s/' % video_id\n player = self._download_json(\n video_base_url + 'configuration', video_id,\n 'Downloading player config JSON metadata')['player']\n options = player['options']\n\n user = options['user']\n if not user.get('hasAccess'):\n raise ExtractorError(\n 'This video is only available for paying users', expected=True)\n # self.raise_login_required() # FIXME: Login is not implemented\n\n token = self._download_json(\n user.get('refreshTokenUrl') or (self._PLAYER_BASE_URL + 'refresh/token'),\n video_id, 'Downloading access token', headers={\n 'x-player-refresh-token': user['refreshToken']\n }, data=b'')['token']\n\n links_url = try_get(options, lambda x: x['video']['url']) or (video_base_url + 'link')\n self._K = ''.join([random.choice('0123456789abcdef') for _ in range(16)])\n message = bytes_to_intlist(json.dumps({\n 'k': self._K,\n 't': token,\n }))\n\n # Sometimes authentication fails for no good reason, retry with\n # a different random padding\n links_data = None\n for _ in range(3):\n padded_message = intlist_to_bytes(pkcs1pad(message, 128))\n n, e = self._RSA_KEY\n encrypted_message = long_to_bytes(pow(bytes_to_long(padded_message), e, n))\n authorization = base64.b64encode(encrypted_message).decode()\n\n try:\n links_data = self._download_json(\n links_url, video_id, 'Downloading links JSON metadata', headers={\n 'X-Player-Token': authorization\n }, query={\n 'freeWithAds': 'true',\n 'adaptive': 'false',\n 'withMetadata': 'true',\n 'source': 'Web'\n })\n break\n except ExtractorError as e:\n if not isinstance(e.cause, compat_HTTPError):\n raise e\n\n if e.cause.code == 401:\n # This usually goes away with a different random pkcs1pad, so retry\n continue\n\n error = self._parse_json(e.cause.read(), video_id)\n message = error.get('message')\n if e.cause.code == 403 and error.get('code') == 'player-bad-geolocation-country':\n self.raise_geo_restricted(msg=message)\n else:\n raise ExtractorError(message)\n else:\n raise ExtractorError('Giving up retrying')\n\n links = links_data.get('links') or {}\n metas = links_data.get('metadata') or {}\n sub_url = (links.get('subtitles') or {}).get('all')\n video_info = links_data.get('video') or {}\n title = metas['title']\n\n formats = []\n for format_id, qualities in (links.get('streaming') or {}).items():\n if not isinstance(qualities, dict):\n continue\n for quality, load_balancer_url in qualities.items():\n load_balancer_data = self._download_json(\n load_balancer_url, video_id,\n 'Downloading %s %s JSON metadata' % (format_id, quality),\n fatal=False) or {}\n m3u8_url = load_balancer_data.get('location')\n if not m3u8_url:\n continue\n m3u8_formats = self._extract_m3u8_formats(\n m3u8_url, video_id, 'mp4', 'm3u8_native',\n m3u8_id=format_id, fatal=False)\n if format_id == 'vf':\n for f in m3u8_formats:\n f['language'] = 'fr'\n formats.extend(m3u8_formats)\n self._sort_formats(formats)\n\n video = (self._download_json(\n self._API_BASE_URL + 'video/%s' % video_id, video_id,\n 'Downloading additional video metadata', fatal=False) or {}).get('video') or {}\n show = video.get('show') or {}\n\n return {\n 'id': video_id,\n 'title': title,\n 'description': strip_or_none(metas.get('summary') or video.get('summary')),\n 'thumbnail': video_info.get('image') or player.get('image'),\n 'formats': formats,\n 'subtitles': self.extract_subtitles(sub_url, video_id),\n 'episode': metas.get('subtitle') or video.get('name'),\n 'episode_number': int_or_none(video.get('shortNumber')),\n 'series': show.get('title'),\n 'season_number': int_or_none(video.get('season')),\n 'duration': int_or_none(video_info.get('duration') or video.get('duration')),\n 'release_date': unified_strdate(video.get('releaseDate')),\n 'average_rating': float_or_none(video.get('rating') or metas.get('rating')),\n 'comment_count': int_or_none(video.get('commentsCount')),\n }\n", "path": "youtube_dl/extractor/adn.py"}], "after_files": [{"content": "# coding: utf-8\nfrom __future__ import unicode_literals\n\nimport base64\nimport binascii\nimport json\nimport os\nimport random\n\nfrom .common import InfoExtractor\nfrom ..aes import aes_cbc_decrypt\nfrom ..compat import (\n compat_HTTPError,\n compat_b64decode,\n compat_ord,\n)\nfrom ..utils import (\n bytes_to_intlist,\n bytes_to_long,\n ExtractorError,\n float_or_none,\n int_or_none,\n intlist_to_bytes,\n long_to_bytes,\n pkcs1pad,\n strip_or_none,\n try_get,\n unified_strdate,\n urlencode_postdata,\n)\n\n\nclass ADNIE(InfoExtractor):\n IE_DESC = 'Anime Digital Network'\n _VALID_URL = r'https?://(?:www\\.)?animedigitalnetwork\\.fr/video/[^/]+/(?P<id>\\d+)'\n _TEST = {\n 'url': 'http://animedigitalnetwork.fr/video/blue-exorcist-kyoto-saga/7778-episode-1-debut-des-hostilites',\n 'md5': '0319c99885ff5547565cacb4f3f9348d',\n 'info_dict': {\n 'id': '7778',\n 'ext': 'mp4',\n 'title': 'Blue Exorcist - Ky\u00f4to Saga - Episode 1',\n 'description': 'md5:2f7b5aa76edbc1a7a92cedcda8a528d5',\n 'series': 'Blue Exorcist - Ky\u00f4to Saga',\n 'duration': 1467,\n 'release_date': '20170106',\n 'comment_count': int,\n 'average_rating': float,\n 'season_number': 2,\n 'episode': 'D\u00e9but des hostilit\u00e9s',\n 'episode_number': 1,\n }\n }\n\n _NETRC_MACHINE = 'animedigitalnetwork'\n _BASE_URL = 'http://animedigitalnetwork.fr'\n _API_BASE_URL = 'https://gw.api.animedigitalnetwork.fr/'\n _PLAYER_BASE_URL = _API_BASE_URL + 'player/'\n _HEADERS = {}\n _RSA_KEY = (0x9B42B08905199A5CCE2026274399CA560ECB209EE9878A708B1C0812E1BB8CB5D1FB7441861147C1A1F2F3A0476DD63A9CAC20D3E983613346850AA6CB38F16DC7D720FD7D86FC6E5B3D5BBC72E14CD0BF9E869F2CEA2CCAD648F1DCE38F1FF916CEFB2D339B64AA0264372344BC775E265E8A852F88144AB0BD9AA06C1A4ABB, 65537)\n _POS_ALIGN_MAP = {\n 'start': 1,\n 'end': 3,\n }\n _LINE_ALIGN_MAP = {\n 'middle': 8,\n 'end': 4,\n }\n\n @staticmethod\n def _ass_subtitles_timecode(seconds):\n return '%01d:%02d:%02d.%02d' % (seconds / 3600, (seconds % 3600) / 60, seconds % 60, (seconds % 1) * 100)\n\n def _get_subtitles(self, sub_url, video_id):\n if not sub_url:\n return None\n\n enc_subtitles = self._download_webpage(\n sub_url, video_id, 'Downloading subtitles location', fatal=False) or '{}'\n subtitle_location = (self._parse_json(enc_subtitles, video_id, fatal=False) or {}).get('location')\n if subtitle_location:\n enc_subtitles = self._download_webpage(\n subtitle_location, video_id, 'Downloading subtitles data',\n fatal=False, headers={'Origin': 'https://animedigitalnetwork.fr'})\n if not enc_subtitles:\n return None\n\n # http://animedigitalnetwork.fr/components/com_vodvideo/videojs/adn-vjs.min.js\n dec_subtitles = intlist_to_bytes(aes_cbc_decrypt(\n bytes_to_intlist(compat_b64decode(enc_subtitles[24:])),\n bytes_to_intlist(binascii.unhexlify(self._K + 'ab9f52f5baae7c72')),\n bytes_to_intlist(compat_b64decode(enc_subtitles[:24]))\n ))\n subtitles_json = self._parse_json(\n dec_subtitles[:-compat_ord(dec_subtitles[-1])].decode(),\n None, fatal=False)\n if not subtitles_json:\n return None\n\n subtitles = {}\n for sub_lang, sub in subtitles_json.items():\n ssa = '''[Script Info]\nScriptType:V4.00\n[V4 Styles]\nFormat: Name,Fontname,Fontsize,PrimaryColour,SecondaryColour,TertiaryColour,BackColour,Bold,Italic,BorderStyle,Outline,Shadow,Alignment,MarginL,MarginR,MarginV,AlphaLevel,Encoding\nStyle: Default,Arial,18,16777215,16777215,16777215,0,-1,0,1,1,0,2,20,20,20,0,0\n[Events]\nFormat: Marked,Start,End,Style,Name,MarginL,MarginR,MarginV,Effect,Text'''\n for current in sub:\n start, end, text, line_align, position_align = (\n float_or_none(current.get('startTime')),\n float_or_none(current.get('endTime')),\n current.get('text'), current.get('lineAlign'),\n current.get('positionAlign'))\n if start is None or end is None or text is None:\n continue\n alignment = self._POS_ALIGN_MAP.get(position_align, 2) + self._LINE_ALIGN_MAP.get(line_align, 0)\n ssa += os.linesep + 'Dialogue: Marked=0,%s,%s,Default,,0,0,0,,%s%s' % (\n self._ass_subtitles_timecode(start),\n self._ass_subtitles_timecode(end),\n '{\\\\a%d}' % alignment if alignment != 2 else '',\n text.replace('\\n', '\\\\N').replace('<i>', '{\\\\i1}').replace('</i>', '{\\\\i0}'))\n\n if sub_lang == 'vostf':\n sub_lang = 'fr'\n subtitles.setdefault(sub_lang, []).extend([{\n 'ext': 'json',\n 'data': json.dumps(sub),\n }, {\n 'ext': 'ssa',\n 'data': ssa,\n }])\n return subtitles\n\n def _real_initialize(self):\n username, password = self._get_login_info()\n if username:\n access_token = (self._download_json(\n self._API_BASE_URL + 'authentication/login', None,\n 'Logging in', errnote='Unable to log in', fatal=False,\n data=urlencode_postdata({\n 'password': password,\n 'rememberMe': False,\n 'source': 'Web',\n 'username': username,\n })) or {}).get('accessToken')\n if access_token:\n self._HEADERS = {'authorization': 'Bearer ' + access_token}\n\n def _real_extract(self, url):\n video_id = self._match_id(url)\n video_base_url = self._PLAYER_BASE_URL + 'video/%s/' % video_id\n player = self._download_json(\n video_base_url + 'configuration', video_id,\n 'Downloading player config JSON metadata', headers=self._HEADERS)['player']\n options = player['options']\n\n user = options['user']\n if not user.get('hasAccess'):\n self.raise_login_required()\n\n token = self._download_json(\n user.get('refreshTokenUrl') or (self._PLAYER_BASE_URL + 'refresh/token'),\n video_id, 'Downloading access token', headers={\n 'x-player-refresh-token': user['refreshToken']\n }, data=b'')['token']\n\n links_url = try_get(options, lambda x: x['video']['url']) or (video_base_url + 'link')\n self._K = ''.join([random.choice('0123456789abcdef') for _ in range(16)])\n message = bytes_to_intlist(json.dumps({\n 'k': self._K,\n 't': token,\n }))\n\n # Sometimes authentication fails for no good reason, retry with\n # a different random padding\n links_data = None\n for _ in range(3):\n padded_message = intlist_to_bytes(pkcs1pad(message, 128))\n n, e = self._RSA_KEY\n encrypted_message = long_to_bytes(pow(bytes_to_long(padded_message), e, n))\n authorization = base64.b64encode(encrypted_message).decode()\n\n try:\n links_data = self._download_json(\n links_url, video_id, 'Downloading links JSON metadata', headers={\n 'X-Player-Token': authorization\n }, query={\n 'freeWithAds': 'true',\n 'adaptive': 'false',\n 'withMetadata': 'true',\n 'source': 'Web'\n })\n break\n except ExtractorError as e:\n if not isinstance(e.cause, compat_HTTPError):\n raise e\n\n if e.cause.code == 401:\n # This usually goes away with a different random pkcs1pad, so retry\n continue\n\n error = self._parse_json(e.cause.read(), video_id)\n message = error.get('message')\n if e.cause.code == 403 and error.get('code') == 'player-bad-geolocation-country':\n self.raise_geo_restricted(msg=message)\n else:\n raise ExtractorError(message)\n else:\n raise ExtractorError('Giving up retrying')\n\n links = links_data.get('links') or {}\n metas = links_data.get('metadata') or {}\n sub_url = (links.get('subtitles') or {}).get('all')\n video_info = links_data.get('video') or {}\n title = metas['title']\n\n formats = []\n for format_id, qualities in (links.get('streaming') or {}).items():\n if not isinstance(qualities, dict):\n continue\n for quality, load_balancer_url in qualities.items():\n load_balancer_data = self._download_json(\n load_balancer_url, video_id,\n 'Downloading %s %s JSON metadata' % (format_id, quality),\n fatal=False) or {}\n m3u8_url = load_balancer_data.get('location')\n if not m3u8_url:\n continue\n m3u8_formats = self._extract_m3u8_formats(\n m3u8_url, video_id, 'mp4', 'm3u8_native',\n m3u8_id=format_id, fatal=False)\n if format_id == 'vf':\n for f in m3u8_formats:\n f['language'] = 'fr'\n formats.extend(m3u8_formats)\n self._sort_formats(formats)\n\n video = (self._download_json(\n self._API_BASE_URL + 'video/%s' % video_id, video_id,\n 'Downloading additional video metadata', fatal=False) or {}).get('video') or {}\n show = video.get('show') or {}\n\n return {\n 'id': video_id,\n 'title': title,\n 'description': strip_or_none(metas.get('summary') or video.get('summary')),\n 'thumbnail': video_info.get('image') or player.get('image'),\n 'formats': formats,\n 'subtitles': self.extract_subtitles(sub_url, video_id),\n 'episode': metas.get('subtitle') or video.get('name'),\n 'episode_number': int_or_none(video.get('shortNumber')),\n 'series': show.get('title'),\n 'season_number': int_or_none(video.get('season')),\n 'duration': int_or_none(video_info.get('duration') or video.get('duration')),\n 'release_date': unified_strdate(video.get('releaseDate')),\n 'average_rating': float_or_none(video.get('rating') or metas.get('rating')),\n 'comment_count': int_or_none(video.get('commentsCount')),\n }\n", "path": "youtube_dl/extractor/adn.py"}]}
| 4,019 | 762 |
gh_patches_debug_42690
|
rasdani/github-patches
|
git_diff
|
openai__gym-2070
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
flattened point dtype does not match flattened space dtype
Originally found this by using the FlattenObservationWrapper and seeing that my flattened space yielded different precision points than the original space. This is a bug that actually originates from the `spaces.utils.flatten_space` function and is caused by some missing dtype arguments.
Simplified examples of the bug:
```
from gym.spaces import utils
from gym.spaces import MultiDiscrete, MultiBinary
original_space = MultiDiscrete([2, 2, 100])
flattened_space = utils.flatten_space(original_space)
original_sample = original_space.sample()
flattened_sample = utils.flatten(original_space, original_sample)
assert flattened_space.dtype == flattened_sample.dtype
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `gym/spaces/utils.py`
Content:
```
1 from collections import OrderedDict
2 import numpy as np
3
4 from gym.spaces import Box
5 from gym.spaces import Discrete
6 from gym.spaces import MultiDiscrete
7 from gym.spaces import MultiBinary
8 from gym.spaces import Tuple
9 from gym.spaces import Dict
10
11
12 def flatdim(space):
13 """Return the number of dimensions a flattened equivalent of this space
14 would have.
15
16 Accepts a space and returns an integer. Raises ``NotImplementedError`` if
17 the space is not defined in ``gym.spaces``.
18 """
19 if isinstance(space, Box):
20 return int(np.prod(space.shape))
21 elif isinstance(space, Discrete):
22 return int(space.n)
23 elif isinstance(space, Tuple):
24 return int(sum([flatdim(s) for s in space.spaces]))
25 elif isinstance(space, Dict):
26 return int(sum([flatdim(s) for s in space.spaces.values()]))
27 elif isinstance(space, MultiBinary):
28 return int(space.n)
29 elif isinstance(space, MultiDiscrete):
30 return int(np.prod(space.shape))
31 else:
32 raise NotImplementedError
33
34
35 def flatten(space, x):
36 """Flatten a data point from a space.
37
38 This is useful when e.g. points from spaces must be passed to a neural
39 network, which only understands flat arrays of floats.
40
41 Accepts a space and a point from that space. Always returns a 1D array.
42 Raises ``NotImplementedError`` if the space is not defined in
43 ``gym.spaces``.
44 """
45 if isinstance(space, Box):
46 return np.asarray(x, dtype=np.float32).flatten()
47 elif isinstance(space, Discrete):
48 onehot = np.zeros(space.n, dtype=np.float32)
49 onehot[x] = 1.0
50 return onehot
51 elif isinstance(space, Tuple):
52 return np.concatenate(
53 [flatten(s, x_part) for x_part, s in zip(x, space.spaces)])
54 elif isinstance(space, Dict):
55 return np.concatenate(
56 [flatten(s, x[key]) for key, s in space.spaces.items()])
57 elif isinstance(space, MultiBinary):
58 return np.asarray(x).flatten()
59 elif isinstance(space, MultiDiscrete):
60 return np.asarray(x).flatten()
61 else:
62 raise NotImplementedError
63
64
65 def unflatten(space, x):
66 """Unflatten a data point from a space.
67
68 This reverses the transformation applied by ``flatten()``. You must ensure
69 that the ``space`` argument is the same as for the ``flatten()`` call.
70
71 Accepts a space and a flattened point. Returns a point with a structure
72 that matches the space. Raises ``NotImplementedError`` if the space is not
73 defined in ``gym.spaces``.
74 """
75 if isinstance(space, Box):
76 return np.asarray(x, dtype=np.float32).reshape(space.shape)
77 elif isinstance(space, Discrete):
78 return int(np.nonzero(x)[0][0])
79 elif isinstance(space, Tuple):
80 dims = [flatdim(s) for s in space.spaces]
81 list_flattened = np.split(x, np.cumsum(dims)[:-1])
82 list_unflattened = [
83 unflatten(s, flattened)
84 for flattened, s in zip(list_flattened, space.spaces)
85 ]
86 return tuple(list_unflattened)
87 elif isinstance(space, Dict):
88 dims = [flatdim(s) for s in space.spaces.values()]
89 list_flattened = np.split(x, np.cumsum(dims)[:-1])
90 list_unflattened = [
91 (key, unflatten(s, flattened))
92 for flattened, (key,
93 s) in zip(list_flattened, space.spaces.items())
94 ]
95 return OrderedDict(list_unflattened)
96 elif isinstance(space, MultiBinary):
97 return np.asarray(x).reshape(space.shape)
98 elif isinstance(space, MultiDiscrete):
99 return np.asarray(x).reshape(space.shape)
100 else:
101 raise NotImplementedError
102
103
104 def flatten_space(space):
105 """Flatten a space into a single ``Box``.
106
107 This is equivalent to ``flatten()``, but operates on the space itself. The
108 result always is a `Box` with flat boundaries. The box has exactly
109 ``flatdim(space)`` dimensions. Flattening a sample of the original space
110 has the same effect as taking a sample of the flattenend space.
111
112 Raises ``NotImplementedError`` if the space is not defined in
113 ``gym.spaces``.
114
115 Example::
116
117 >>> box = Box(0.0, 1.0, shape=(3, 4, 5))
118 >>> box
119 Box(3, 4, 5)
120 >>> flatten_space(box)
121 Box(60,)
122 >>> flatten(box, box.sample()) in flatten_space(box)
123 True
124
125 Example that flattens a discrete space::
126
127 >>> discrete = Discrete(5)
128 >>> flatten_space(discrete)
129 Box(5,)
130 >>> flatten(box, box.sample()) in flatten_space(box)
131 True
132
133 Example that recursively flattens a dict::
134
135 >>> space = Dict({"position": Discrete(2),
136 ... "velocity": Box(0, 1, shape=(2, 2))})
137 >>> flatten_space(space)
138 Box(6,)
139 >>> flatten(space, space.sample()) in flatten_space(space)
140 True
141 """
142 if isinstance(space, Box):
143 return Box(space.low.flatten(), space.high.flatten())
144 if isinstance(space, Discrete):
145 return Box(low=0, high=1, shape=(space.n, ))
146 if isinstance(space, Tuple):
147 space = [flatten_space(s) for s in space.spaces]
148 return Box(
149 low=np.concatenate([s.low for s in space]),
150 high=np.concatenate([s.high for s in space]),
151 )
152 if isinstance(space, Dict):
153 space = [flatten_space(s) for s in space.spaces.values()]
154 return Box(
155 low=np.concatenate([s.low for s in space]),
156 high=np.concatenate([s.high for s in space]),
157 )
158 if isinstance(space, MultiBinary):
159 return Box(low=0, high=1, shape=(space.n, ))
160 if isinstance(space, MultiDiscrete):
161 return Box(
162 low=np.zeros_like(space.nvec),
163 high=space.nvec,
164 )
165 raise NotImplementedError
166
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/gym/spaces/utils.py b/gym/spaces/utils.py
--- a/gym/spaces/utils.py
+++ b/gym/spaces/utils.py
@@ -43,10 +43,10 @@
``gym.spaces``.
"""
if isinstance(space, Box):
- return np.asarray(x, dtype=np.float32).flatten()
+ return np.asarray(x, dtype=space.dtype).flatten()
elif isinstance(space, Discrete):
- onehot = np.zeros(space.n, dtype=np.float32)
- onehot[x] = 1.0
+ onehot = np.zeros(space.n, dtype=space.dtype)
+ onehot[x] = 1
return onehot
elif isinstance(space, Tuple):
return np.concatenate(
@@ -55,9 +55,9 @@
return np.concatenate(
[flatten(s, x[key]) for key, s in space.spaces.items()])
elif isinstance(space, MultiBinary):
- return np.asarray(x).flatten()
+ return np.asarray(x, dtype=space.dtype).flatten()
elif isinstance(space, MultiDiscrete):
- return np.asarray(x).flatten()
+ return np.asarray(x, dtype=space.dtype).flatten()
else:
raise NotImplementedError
@@ -73,7 +73,7 @@
defined in ``gym.spaces``.
"""
if isinstance(space, Box):
- return np.asarray(x, dtype=np.float32).reshape(space.shape)
+ return np.asarray(x, dtype=space.dtype).reshape(space.shape)
elif isinstance(space, Discrete):
return int(np.nonzero(x)[0][0])
elif isinstance(space, Tuple):
@@ -94,9 +94,9 @@
]
return OrderedDict(list_unflattened)
elif isinstance(space, MultiBinary):
- return np.asarray(x).reshape(space.shape)
+ return np.asarray(x, dtype=space.dtype).reshape(space.shape)
elif isinstance(space, MultiDiscrete):
- return np.asarray(x).reshape(space.shape)
+ return np.asarray(x, dtype=space.dtype).reshape(space.shape)
else:
raise NotImplementedError
@@ -140,26 +140,33 @@
True
"""
if isinstance(space, Box):
- return Box(space.low.flatten(), space.high.flatten())
+ return Box(space.low.flatten(), space.high.flatten(), dtype=space.dtype)
if isinstance(space, Discrete):
- return Box(low=0, high=1, shape=(space.n, ))
+ return Box(low=0, high=1, shape=(space.n, ), dtype=space.dtype)
if isinstance(space, Tuple):
space = [flatten_space(s) for s in space.spaces]
return Box(
low=np.concatenate([s.low for s in space]),
high=np.concatenate([s.high for s in space]),
+ dtype=np.result_type(*[s.dtype for s in space])
)
if isinstance(space, Dict):
space = [flatten_space(s) for s in space.spaces.values()]
return Box(
low=np.concatenate([s.low for s in space]),
high=np.concatenate([s.high for s in space]),
+ dtype=np.result_type(*[s.dtype for s in space])
)
if isinstance(space, MultiBinary):
- return Box(low=0, high=1, shape=(space.n, ))
+ return Box(low=0,
+ high=1,
+ shape=(space.n, ),
+ dtype=space.dtype
+ )
if isinstance(space, MultiDiscrete):
return Box(
low=np.zeros_like(space.nvec),
high=space.nvec,
+ dtype=space.dtype
)
raise NotImplementedError
|
{"golden_diff": "diff --git a/gym/spaces/utils.py b/gym/spaces/utils.py\n--- a/gym/spaces/utils.py\n+++ b/gym/spaces/utils.py\n@@ -43,10 +43,10 @@\n ``gym.spaces``.\n \"\"\"\n if isinstance(space, Box):\n- return np.asarray(x, dtype=np.float32).flatten()\n+ return np.asarray(x, dtype=space.dtype).flatten()\n elif isinstance(space, Discrete):\n- onehot = np.zeros(space.n, dtype=np.float32)\n- onehot[x] = 1.0\n+ onehot = np.zeros(space.n, dtype=space.dtype)\n+ onehot[x] = 1\n return onehot\n elif isinstance(space, Tuple):\n return np.concatenate(\n@@ -55,9 +55,9 @@\n return np.concatenate(\n [flatten(s, x[key]) for key, s in space.spaces.items()])\n elif isinstance(space, MultiBinary):\n- return np.asarray(x).flatten()\n+ return np.asarray(x, dtype=space.dtype).flatten()\n elif isinstance(space, MultiDiscrete):\n- return np.asarray(x).flatten()\n+ return np.asarray(x, dtype=space.dtype).flatten()\n else:\n raise NotImplementedError\n \n@@ -73,7 +73,7 @@\n defined in ``gym.spaces``.\n \"\"\"\n if isinstance(space, Box):\n- return np.asarray(x, dtype=np.float32).reshape(space.shape)\n+ return np.asarray(x, dtype=space.dtype).reshape(space.shape)\n elif isinstance(space, Discrete):\n return int(np.nonzero(x)[0][0])\n elif isinstance(space, Tuple):\n@@ -94,9 +94,9 @@\n ]\n return OrderedDict(list_unflattened)\n elif isinstance(space, MultiBinary):\n- return np.asarray(x).reshape(space.shape)\n+ return np.asarray(x, dtype=space.dtype).reshape(space.shape)\n elif isinstance(space, MultiDiscrete):\n- return np.asarray(x).reshape(space.shape)\n+ return np.asarray(x, dtype=space.dtype).reshape(space.shape)\n else:\n raise NotImplementedError\n \n@@ -140,26 +140,33 @@\n True\n \"\"\"\n if isinstance(space, Box):\n- return Box(space.low.flatten(), space.high.flatten())\n+ return Box(space.low.flatten(), space.high.flatten(), dtype=space.dtype)\n if isinstance(space, Discrete):\n- return Box(low=0, high=1, shape=(space.n, ))\n+ return Box(low=0, high=1, shape=(space.n, ), dtype=space.dtype)\n if isinstance(space, Tuple):\n space = [flatten_space(s) for s in space.spaces]\n return Box(\n low=np.concatenate([s.low for s in space]),\n high=np.concatenate([s.high for s in space]),\n+ dtype=np.result_type(*[s.dtype for s in space])\n )\n if isinstance(space, Dict):\n space = [flatten_space(s) for s in space.spaces.values()]\n return Box(\n low=np.concatenate([s.low for s in space]),\n high=np.concatenate([s.high for s in space]),\n+ dtype=np.result_type(*[s.dtype for s in space])\n )\n if isinstance(space, MultiBinary):\n- return Box(low=0, high=1, shape=(space.n, ))\n+ return Box(low=0,\n+ high=1,\n+ shape=(space.n, ),\n+ dtype=space.dtype\n+ )\n if isinstance(space, MultiDiscrete):\n return Box(\n low=np.zeros_like(space.nvec),\n high=space.nvec,\n+ dtype=space.dtype\n )\n raise NotImplementedError\n", "issue": "flattened point dtype does not match flattened space dtype\nOriginally found this by using the FlattenObservationWrapper and seeing that my flattened space yielded different precision points than the original space. This is a bug that actually originates from the `spaces.utils.flatten_space` function and is caused by some missing dtype arguments. \r\n\r\nSimplified examples of the bug:\r\n```\r\nfrom gym.spaces import utils\r\nfrom gym.spaces import MultiDiscrete, MultiBinary\r\n\r\noriginal_space = MultiDiscrete([2, 2, 100])\r\nflattened_space = utils.flatten_space(original_space)\r\n\r\noriginal_sample = original_space.sample()\r\nflattened_sample = utils.flatten(original_space, original_sample)\r\n\r\nassert flattened_space.dtype == flattened_sample.dtype\r\n```\n", "before_files": [{"content": "from collections import OrderedDict\nimport numpy as np\n\nfrom gym.spaces import Box\nfrom gym.spaces import Discrete\nfrom gym.spaces import MultiDiscrete\nfrom gym.spaces import MultiBinary\nfrom gym.spaces import Tuple\nfrom gym.spaces import Dict\n\n\ndef flatdim(space):\n \"\"\"Return the number of dimensions a flattened equivalent of this space\n would have.\n\n Accepts a space and returns an integer. Raises ``NotImplementedError`` if\n the space is not defined in ``gym.spaces``.\n \"\"\"\n if isinstance(space, Box):\n return int(np.prod(space.shape))\n elif isinstance(space, Discrete):\n return int(space.n)\n elif isinstance(space, Tuple):\n return int(sum([flatdim(s) for s in space.spaces]))\n elif isinstance(space, Dict):\n return int(sum([flatdim(s) for s in space.spaces.values()]))\n elif isinstance(space, MultiBinary):\n return int(space.n)\n elif isinstance(space, MultiDiscrete):\n return int(np.prod(space.shape))\n else:\n raise NotImplementedError\n\n\ndef flatten(space, x):\n \"\"\"Flatten a data point from a space.\n\n This is useful when e.g. points from spaces must be passed to a neural\n network, which only understands flat arrays of floats.\n\n Accepts a space and a point from that space. Always returns a 1D array.\n Raises ``NotImplementedError`` if the space is not defined in\n ``gym.spaces``.\n \"\"\"\n if isinstance(space, Box):\n return np.asarray(x, dtype=np.float32).flatten()\n elif isinstance(space, Discrete):\n onehot = np.zeros(space.n, dtype=np.float32)\n onehot[x] = 1.0\n return onehot\n elif isinstance(space, Tuple):\n return np.concatenate(\n [flatten(s, x_part) for x_part, s in zip(x, space.spaces)])\n elif isinstance(space, Dict):\n return np.concatenate(\n [flatten(s, x[key]) for key, s in space.spaces.items()])\n elif isinstance(space, MultiBinary):\n return np.asarray(x).flatten()\n elif isinstance(space, MultiDiscrete):\n return np.asarray(x).flatten()\n else:\n raise NotImplementedError\n\n\ndef unflatten(space, x):\n \"\"\"Unflatten a data point from a space.\n\n This reverses the transformation applied by ``flatten()``. You must ensure\n that the ``space`` argument is the same as for the ``flatten()`` call.\n\n Accepts a space and a flattened point. Returns a point with a structure\n that matches the space. Raises ``NotImplementedError`` if the space is not\n defined in ``gym.spaces``.\n \"\"\"\n if isinstance(space, Box):\n return np.asarray(x, dtype=np.float32).reshape(space.shape)\n elif isinstance(space, Discrete):\n return int(np.nonzero(x)[0][0])\n elif isinstance(space, Tuple):\n dims = [flatdim(s) for s in space.spaces]\n list_flattened = np.split(x, np.cumsum(dims)[:-1])\n list_unflattened = [\n unflatten(s, flattened)\n for flattened, s in zip(list_flattened, space.spaces)\n ]\n return tuple(list_unflattened)\n elif isinstance(space, Dict):\n dims = [flatdim(s) for s in space.spaces.values()]\n list_flattened = np.split(x, np.cumsum(dims)[:-1])\n list_unflattened = [\n (key, unflatten(s, flattened))\n for flattened, (key,\n s) in zip(list_flattened, space.spaces.items())\n ]\n return OrderedDict(list_unflattened)\n elif isinstance(space, MultiBinary):\n return np.asarray(x).reshape(space.shape)\n elif isinstance(space, MultiDiscrete):\n return np.asarray(x).reshape(space.shape)\n else:\n raise NotImplementedError\n\n\ndef flatten_space(space):\n \"\"\"Flatten a space into a single ``Box``.\n\n This is equivalent to ``flatten()``, but operates on the space itself. The\n result always is a `Box` with flat boundaries. The box has exactly\n ``flatdim(space)`` dimensions. Flattening a sample of the original space\n has the same effect as taking a sample of the flattenend space.\n\n Raises ``NotImplementedError`` if the space is not defined in\n ``gym.spaces``.\n\n Example::\n\n >>> box = Box(0.0, 1.0, shape=(3, 4, 5))\n >>> box\n Box(3, 4, 5)\n >>> flatten_space(box)\n Box(60,)\n >>> flatten(box, box.sample()) in flatten_space(box)\n True\n\n Example that flattens a discrete space::\n\n >>> discrete = Discrete(5)\n >>> flatten_space(discrete)\n Box(5,)\n >>> flatten(box, box.sample()) in flatten_space(box)\n True\n\n Example that recursively flattens a dict::\n\n >>> space = Dict({\"position\": Discrete(2),\n ... \"velocity\": Box(0, 1, shape=(2, 2))})\n >>> flatten_space(space)\n Box(6,)\n >>> flatten(space, space.sample()) in flatten_space(space)\n True\n \"\"\"\n if isinstance(space, Box):\n return Box(space.low.flatten(), space.high.flatten())\n if isinstance(space, Discrete):\n return Box(low=0, high=1, shape=(space.n, ))\n if isinstance(space, Tuple):\n space = [flatten_space(s) for s in space.spaces]\n return Box(\n low=np.concatenate([s.low for s in space]),\n high=np.concatenate([s.high for s in space]),\n )\n if isinstance(space, Dict):\n space = [flatten_space(s) for s in space.spaces.values()]\n return Box(\n low=np.concatenate([s.low for s in space]),\n high=np.concatenate([s.high for s in space]),\n )\n if isinstance(space, MultiBinary):\n return Box(low=0, high=1, shape=(space.n, ))\n if isinstance(space, MultiDiscrete):\n return Box(\n low=np.zeros_like(space.nvec),\n high=space.nvec,\n )\n raise NotImplementedError\n", "path": "gym/spaces/utils.py"}], "after_files": [{"content": "from collections import OrderedDict\nimport numpy as np\n\nfrom gym.spaces import Box\nfrom gym.spaces import Discrete\nfrom gym.spaces import MultiDiscrete\nfrom gym.spaces import MultiBinary\nfrom gym.spaces import Tuple\nfrom gym.spaces import Dict\n\n\ndef flatdim(space):\n \"\"\"Return the number of dimensions a flattened equivalent of this space\n would have.\n\n Accepts a space and returns an integer. Raises ``NotImplementedError`` if\n the space is not defined in ``gym.spaces``.\n \"\"\"\n if isinstance(space, Box):\n return int(np.prod(space.shape))\n elif isinstance(space, Discrete):\n return int(space.n)\n elif isinstance(space, Tuple):\n return int(sum([flatdim(s) for s in space.spaces]))\n elif isinstance(space, Dict):\n return int(sum([flatdim(s) for s in space.spaces.values()]))\n elif isinstance(space, MultiBinary):\n return int(space.n)\n elif isinstance(space, MultiDiscrete):\n return int(np.prod(space.shape))\n else:\n raise NotImplementedError\n\n\ndef flatten(space, x):\n \"\"\"Flatten a data point from a space.\n\n This is useful when e.g. points from spaces must be passed to a neural\n network, which only understands flat arrays of floats.\n\n Accepts a space and a point from that space. Always returns a 1D array.\n Raises ``NotImplementedError`` if the space is not defined in\n ``gym.spaces``.\n \"\"\"\n if isinstance(space, Box):\n return np.asarray(x, dtype=space.dtype).flatten()\n elif isinstance(space, Discrete):\n onehot = np.zeros(space.n, dtype=space.dtype)\n onehot[x] = 1\n return onehot\n elif isinstance(space, Tuple):\n return np.concatenate(\n [flatten(s, x_part) for x_part, s in zip(x, space.spaces)])\n elif isinstance(space, Dict):\n return np.concatenate(\n [flatten(s, x[key]) for key, s in space.spaces.items()])\n elif isinstance(space, MultiBinary):\n return np.asarray(x, dtype=space.dtype).flatten()\n elif isinstance(space, MultiDiscrete):\n return np.asarray(x, dtype=space.dtype).flatten()\n else:\n raise NotImplementedError\n\n\ndef unflatten(space, x):\n \"\"\"Unflatten a data point from a space.\n\n This reverses the transformation applied by ``flatten()``. You must ensure\n that the ``space`` argument is the same as for the ``flatten()`` call.\n\n Accepts a space and a flattened point. Returns a point with a structure\n that matches the space. Raises ``NotImplementedError`` if the space is not\n defined in ``gym.spaces``.\n \"\"\"\n if isinstance(space, Box):\n return np.asarray(x, dtype=space.dtype).reshape(space.shape)\n elif isinstance(space, Discrete):\n return int(np.nonzero(x)[0][0])\n elif isinstance(space, Tuple):\n dims = [flatdim(s) for s in space.spaces]\n list_flattened = np.split(x, np.cumsum(dims)[:-1])\n list_unflattened = [\n unflatten(s, flattened)\n for flattened, s in zip(list_flattened, space.spaces)\n ]\n return tuple(list_unflattened)\n elif isinstance(space, Dict):\n dims = [flatdim(s) for s in space.spaces.values()]\n list_flattened = np.split(x, np.cumsum(dims)[:-1])\n list_unflattened = [\n (key, unflatten(s, flattened))\n for flattened, (key,\n s) in zip(list_flattened, space.spaces.items())\n ]\n return OrderedDict(list_unflattened)\n elif isinstance(space, MultiBinary):\n return np.asarray(x, dtype=space.dtype).reshape(space.shape)\n elif isinstance(space, MultiDiscrete):\n return np.asarray(x, dtype=space.dtype).reshape(space.shape)\n else:\n raise NotImplementedError\n\n\ndef flatten_space(space):\n \"\"\"Flatten a space into a single ``Box``.\n\n This is equivalent to ``flatten()``, but operates on the space itself. The\n result always is a `Box` with flat boundaries. The box has exactly\n ``flatdim(space)`` dimensions. Flattening a sample of the original space\n has the same effect as taking a sample of the flattenend space.\n\n Raises ``NotImplementedError`` if the space is not defined in\n ``gym.spaces``.\n\n Example::\n\n >>> box = Box(0.0, 1.0, shape=(3, 4, 5))\n >>> box\n Box(3, 4, 5)\n >>> flatten_space(box)\n Box(60,)\n >>> flatten(box, box.sample()) in flatten_space(box)\n True\n\n Example that flattens a discrete space::\n\n >>> discrete = Discrete(5)\n >>> flatten_space(discrete)\n Box(5,)\n >>> flatten(box, box.sample()) in flatten_space(box)\n True\n\n Example that recursively flattens a dict::\n\n >>> space = Dict({\"position\": Discrete(2),\n ... \"velocity\": Box(0, 1, shape=(2, 2))})\n >>> flatten_space(space)\n Box(6,)\n >>> flatten(space, space.sample()) in flatten_space(space)\n True\n \"\"\"\n if isinstance(space, Box):\n return Box(space.low.flatten(), space.high.flatten(), dtype=space.dtype)\n if isinstance(space, Discrete):\n return Box(low=0, high=1, shape=(space.n, ), dtype=space.dtype)\n if isinstance(space, Tuple):\n space = [flatten_space(s) for s in space.spaces]\n return Box(\n low=np.concatenate([s.low for s in space]),\n high=np.concatenate([s.high for s in space]),\n dtype=np.result_type(*[s.dtype for s in space])\n )\n if isinstance(space, Dict):\n space = [flatten_space(s) for s in space.spaces.values()]\n return Box(\n low=np.concatenate([s.low for s in space]),\n high=np.concatenate([s.high for s in space]),\n dtype=np.result_type(*[s.dtype for s in space])\n )\n if isinstance(space, MultiBinary):\n return Box(low=0,\n high=1,\n shape=(space.n, ),\n dtype=space.dtype\n )\n if isinstance(space, MultiDiscrete):\n return Box(\n low=np.zeros_like(space.nvec),\n high=space.nvec,\n dtype=space.dtype\n )\n raise NotImplementedError\n", "path": "gym/spaces/utils.py"}]}
| 2,175 | 812 |
gh_patches_debug_19904
|
rasdani/github-patches
|
git_diff
|
freedomofpress__securedrop-6771
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
New bandit failures
```
Test results:
>> Issue: [B113:request_without_timeout] Requests call without timeout
Severity: Medium Confidence: Low
CWE: CWE-400 (https://cwe.mitre.org/data/definitions/400.html)
More Info: https://bandit.readthedocs.io/en/1.7.5/plugins/b113_request_without_timeout.html
Location: ./admin/tests/test_integration.py:591:21
590 )
591 latest_release = requests.get(github_url).json()
592 latest_tag = str(latest_release["tag_name"])
--------------------------------------------------
>> Issue: [B113:request_without_timeout] Requests call without timeout
Severity: Medium Confidence: Low
CWE: CWE-400 (https://cwe.mitre.org/data/definitions/400.html)
More Info: https://bandit.readthedocs.io/en/1.7.5/plugins/b113_request_without_timeout.html
Location: ./admin/tests/test_integration.py:664:21
663 )
664 latest_release = requests.get(github_url).json()
665 latest_tag = str(latest_release["tag_name"])
--------------------------------------------------
>> Issue: [B608:hardcoded_sql_expressions] Possible SQL injection vector through string-based query construction.
Severity: Medium Confidence: Medium
CWE: CWE-89 (https://cwe.mitre.org/data/definitions/89.html)
More Info: https://bandit.readthedocs.io/en/1.7.5/plugins/b608_hardcoded_sql_expressions.html
Location: ./securedrop/alembic/versions/2e24fc7536e8_make_journalist_id_non_nullable.py:75:12
74 result = conn.execute( # nosec
75 f"SELECT 1 FROM {table} WHERE journalist_id IS NULL;"
76 ).first()
--------------------------------------------------
>> Issue: [B608:hardcoded_sql_expressions] Possible SQL injection vector through string-based query construction.
Severity: Medium Confidence: Low
CWE: CWE-89 (https://cwe.mitre.org/data/definitions/89.html)
More Info: https://bandit.readthedocs.io/en/1.7.5/plugins/b608_hardcoded_sql_expressions.html
Location: ./securedrop/alembic/versions/2e24fc7536e8_make_journalist_id_non_nullable.py:92:16
91 sa.text(
92 f"UPDATE OR IGNORE {table} SET journalist_id=:journalist_id "
93 "WHERE journalist_id IS NULL;"
94 ).bindparams(journalist_id=deleted_id)
--------------------------------------------------
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `securedrop/alembic/versions/2e24fc7536e8_make_journalist_id_non_nullable.py`
Content:
```
1 """make journalist_id non-nullable
2
3 Revision ID: 2e24fc7536e8
4 Revises: de00920916bf
5 Create Date: 2022-01-12 19:31:06.186285
6
7 """
8 import os
9 import uuid
10
11 import argon2
12 import sqlalchemy as sa
13 import two_factor
14 from alembic import op
15
16 # raise the errors if we're not in production
17 raise_errors = os.environ.get("SECUREDROP_ENV", "prod") != "prod"
18
19 try:
20 from models import ARGON2_PARAMS
21 from passphrases import PassphraseGenerator
22 except: # noqa
23 if raise_errors:
24 raise
25
26
27 # revision identifiers, used by Alembic.
28 revision = "2e24fc7536e8"
29 down_revision = "de00920916bf"
30 branch_labels = None
31 depends_on = None
32
33
34 def generate_passphrase_hash() -> str:
35 passphrase = PassphraseGenerator.get_default().generate_passphrase()
36 return argon2.PasswordHasher(**ARGON2_PARAMS).hash(passphrase)
37
38
39 def create_deleted() -> int:
40 """manually insert a "deleted" journalist user.
41
42 We need to do it this way since the model will reflect the current state of
43 the schema, not what it is at the current migration step
44
45 It should be basically identical to what Journalist.get_deleted() does
46 """
47 op.execute(
48 sa.text(
49 """\
50 INSERT INTO journalists (uuid, username, session_nonce, passphrase_hash, otp_secret)
51 VALUES (:uuid, "deleted", 0, :passphrase_hash, :otp_secret);
52 """
53 ).bindparams(
54 uuid=str(uuid.uuid4()),
55 passphrase_hash=generate_passphrase_hash(),
56 otp_secret=two_factor.random_base32(),
57 )
58 )
59 # Get the autoincrement ID back
60 conn = op.get_bind()
61 result = conn.execute('SELECT id FROM journalists WHERE username="deleted";').fetchall()
62 return result[0][0]
63
64
65 def migrate_nulls() -> None:
66 """migrate existing journalist_id=NULL over to deleted or delete them"""
67 op.execute("DELETE FROM journalist_login_attempt WHERE journalist_id IS NULL;")
68 op.execute("DELETE FROM revoked_tokens WHERE journalist_id IS NULL;")
69 # Look to see if we have data to migrate
70 tables = ("replies", "seen_files", "seen_messages", "seen_replies")
71 needs_migration = []
72 conn = op.get_bind()
73 for table in tables:
74 result = conn.execute( # nosec
75 f"SELECT 1 FROM {table} WHERE journalist_id IS NULL;"
76 ).first()
77 if result is not None:
78 needs_migration.append(table)
79
80 if not needs_migration:
81 return
82
83 deleted_id = create_deleted()
84 for table in needs_migration:
85 # The seen_ tables have UNIQUE(fk_id, journalist_id), so the deleted journalist can only
86 # have seen each item once. It is possible multiple NULL journalist have seen the same thing
87 # so we do this update in two passes.
88 # First we update as many rows to point to the deleted journalist as possible, ignoring any
89 # unique key violations.
90 op.execute(
91 sa.text(
92 f"UPDATE OR IGNORE {table} SET journalist_id=:journalist_id "
93 "WHERE journalist_id IS NULL;"
94 ).bindparams(journalist_id=deleted_id)
95 )
96 # Then we delete any leftovers which had been ignored earlier.
97 op.execute(f"DELETE FROM {table} WHERE journalist_id IS NULL") # nosec
98
99
100 def upgrade() -> None:
101 migrate_nulls()
102
103 with op.batch_alter_table("journalist_login_attempt", schema=None) as batch_op:
104 batch_op.alter_column("journalist_id", existing_type=sa.INTEGER(), nullable=False)
105
106 with op.batch_alter_table("replies", schema=None) as batch_op:
107 batch_op.alter_column("journalist_id", existing_type=sa.INTEGER(), nullable=False)
108
109 with op.batch_alter_table("revoked_tokens", schema=None) as batch_op:
110 batch_op.alter_column("journalist_id", existing_type=sa.INTEGER(), nullable=False)
111
112 with op.batch_alter_table("seen_files", schema=None) as batch_op:
113 batch_op.alter_column("journalist_id", existing_type=sa.INTEGER(), nullable=False)
114
115 with op.batch_alter_table("seen_messages", schema=None) as batch_op:
116 batch_op.alter_column("journalist_id", existing_type=sa.INTEGER(), nullable=False)
117
118 with op.batch_alter_table("seen_replies", schema=None) as batch_op:
119 batch_op.alter_column("journalist_id", existing_type=sa.INTEGER(), nullable=False)
120
121
122 def downgrade() -> None:
123 # We do not un-migrate the data back to journalist_id=NULL
124
125 with op.batch_alter_table("seen_replies", schema=None) as batch_op:
126 batch_op.alter_column("journalist_id", existing_type=sa.INTEGER(), nullable=True)
127
128 with op.batch_alter_table("seen_messages", schema=None) as batch_op:
129 batch_op.alter_column("journalist_id", existing_type=sa.INTEGER(), nullable=True)
130
131 with op.batch_alter_table("seen_files", schema=None) as batch_op:
132 batch_op.alter_column("journalist_id", existing_type=sa.INTEGER(), nullable=True)
133
134 with op.batch_alter_table("revoked_tokens", schema=None) as batch_op:
135 batch_op.alter_column("journalist_id", existing_type=sa.INTEGER(), nullable=True)
136
137 with op.batch_alter_table("replies", schema=None) as batch_op:
138 batch_op.alter_column("journalist_id", existing_type=sa.INTEGER(), nullable=True)
139
140 with op.batch_alter_table("journalist_login_attempt", schema=None) as batch_op:
141 batch_op.alter_column("journalist_id", existing_type=sa.INTEGER(), nullable=True)
142
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/securedrop/alembic/versions/2e24fc7536e8_make_journalist_id_non_nullable.py b/securedrop/alembic/versions/2e24fc7536e8_make_journalist_id_non_nullable.py
--- a/securedrop/alembic/versions/2e24fc7536e8_make_journalist_id_non_nullable.py
+++ b/securedrop/alembic/versions/2e24fc7536e8_make_journalist_id_non_nullable.py
@@ -71,8 +71,8 @@
needs_migration = []
conn = op.get_bind()
for table in tables:
- result = conn.execute( # nosec
- f"SELECT 1 FROM {table} WHERE journalist_id IS NULL;"
+ result = conn.execute(
+ f"SELECT 1 FROM {table} WHERE journalist_id IS NULL;" # nosec
).first()
if result is not None:
needs_migration.append(table)
@@ -89,7 +89,7 @@
# unique key violations.
op.execute(
sa.text(
- f"UPDATE OR IGNORE {table} SET journalist_id=:journalist_id "
+ f"UPDATE OR IGNORE {table} SET journalist_id=:journalist_id " # nosec
"WHERE journalist_id IS NULL;"
).bindparams(journalist_id=deleted_id)
)
|
{"golden_diff": "diff --git a/securedrop/alembic/versions/2e24fc7536e8_make_journalist_id_non_nullable.py b/securedrop/alembic/versions/2e24fc7536e8_make_journalist_id_non_nullable.py\n--- a/securedrop/alembic/versions/2e24fc7536e8_make_journalist_id_non_nullable.py\n+++ b/securedrop/alembic/versions/2e24fc7536e8_make_journalist_id_non_nullable.py\n@@ -71,8 +71,8 @@\n needs_migration = []\n conn = op.get_bind()\n for table in tables:\n- result = conn.execute( # nosec\n- f\"SELECT 1 FROM {table} WHERE journalist_id IS NULL;\"\n+ result = conn.execute(\n+ f\"SELECT 1 FROM {table} WHERE journalist_id IS NULL;\" # nosec\n ).first()\n if result is not None:\n needs_migration.append(table)\n@@ -89,7 +89,7 @@\n # unique key violations.\n op.execute(\n sa.text(\n- f\"UPDATE OR IGNORE {table} SET journalist_id=:journalist_id \"\n+ f\"UPDATE OR IGNORE {table} SET journalist_id=:journalist_id \" # nosec\n \"WHERE journalist_id IS NULL;\"\n ).bindparams(journalist_id=deleted_id)\n )\n", "issue": "New bandit failures\n```\r\nTest results:\r\n>> Issue: [B113:request_without_timeout] Requests call without timeout\r\n Severity: Medium Confidence: Low\r\n CWE: CWE-400 (https://cwe.mitre.org/data/definitions/400.html)\r\n More Info: https://bandit.readthedocs.io/en/1.7.5/plugins/b113_request_without_timeout.html\r\n Location: ./admin/tests/test_integration.py:591:21\r\n590\t )\r\n591\t latest_release = requests.get(github_url).json()\r\n592\t latest_tag = str(latest_release[\"tag_name\"])\r\n\r\n--------------------------------------------------\r\n>> Issue: [B113:request_without_timeout] Requests call without timeout\r\n Severity: Medium Confidence: Low\r\n CWE: CWE-400 (https://cwe.mitre.org/data/definitions/400.html)\r\n More Info: https://bandit.readthedocs.io/en/1.7.5/plugins/b113_request_without_timeout.html\r\n Location: ./admin/tests/test_integration.py:664:21\r\n663\t )\r\n664\t latest_release = requests.get(github_url).json()\r\n665\t latest_tag = str(latest_release[\"tag_name\"])\r\n\r\n--------------------------------------------------\r\n>> Issue: [B608:hardcoded_sql_expressions] Possible SQL injection vector through string-based query construction.\r\n Severity: Medium Confidence: Medium\r\n CWE: CWE-89 (https://cwe.mitre.org/data/definitions/89.html)\r\n More Info: https://bandit.readthedocs.io/en/1.7.5/plugins/b608_hardcoded_sql_expressions.html\r\n Location: ./securedrop/alembic/versions/2e24fc7536e8_make_journalist_id_non_nullable.py:75:12\r\n74\t result = conn.execute( # nosec\r\n75\t f\"SELECT 1 FROM {table} WHERE journalist_id IS NULL;\"\r\n76\t ).first()\r\n\r\n--------------------------------------------------\r\n>> Issue: [B608:hardcoded_sql_expressions] Possible SQL injection vector through string-based query construction.\r\n Severity: Medium Confidence: Low\r\n CWE: CWE-89 (https://cwe.mitre.org/data/definitions/89.html)\r\n More Info: https://bandit.readthedocs.io/en/1.7.5/plugins/b608_hardcoded_sql_expressions.html\r\n Location: ./securedrop/alembic/versions/2e24fc7536e8_make_journalist_id_non_nullable.py:92:16\r\n91\t sa.text(\r\n92\t f\"UPDATE OR IGNORE {table} SET journalist_id=:journalist_id \"\r\n93\t \"WHERE journalist_id IS NULL;\"\r\n94\t ).bindparams(journalist_id=deleted_id)\r\n\r\n--------------------------------------------------\r\n```\n", "before_files": [{"content": "\"\"\"make journalist_id non-nullable\n\nRevision ID: 2e24fc7536e8\nRevises: de00920916bf\nCreate Date: 2022-01-12 19:31:06.186285\n\n\"\"\"\nimport os\nimport uuid\n\nimport argon2\nimport sqlalchemy as sa\nimport two_factor\nfrom alembic import op\n\n# raise the errors if we're not in production\nraise_errors = os.environ.get(\"SECUREDROP_ENV\", \"prod\") != \"prod\"\n\ntry:\n from models import ARGON2_PARAMS\n from passphrases import PassphraseGenerator\nexcept: # noqa\n if raise_errors:\n raise\n\n\n# revision identifiers, used by Alembic.\nrevision = \"2e24fc7536e8\"\ndown_revision = \"de00920916bf\"\nbranch_labels = None\ndepends_on = None\n\n\ndef generate_passphrase_hash() -> str:\n passphrase = PassphraseGenerator.get_default().generate_passphrase()\n return argon2.PasswordHasher(**ARGON2_PARAMS).hash(passphrase)\n\n\ndef create_deleted() -> int:\n \"\"\"manually insert a \"deleted\" journalist user.\n\n We need to do it this way since the model will reflect the current state of\n the schema, not what it is at the current migration step\n\n It should be basically identical to what Journalist.get_deleted() does\n \"\"\"\n op.execute(\n sa.text(\n \"\"\"\\\n INSERT INTO journalists (uuid, username, session_nonce, passphrase_hash, otp_secret)\n VALUES (:uuid, \"deleted\", 0, :passphrase_hash, :otp_secret);\n \"\"\"\n ).bindparams(\n uuid=str(uuid.uuid4()),\n passphrase_hash=generate_passphrase_hash(),\n otp_secret=two_factor.random_base32(),\n )\n )\n # Get the autoincrement ID back\n conn = op.get_bind()\n result = conn.execute('SELECT id FROM journalists WHERE username=\"deleted\";').fetchall()\n return result[0][0]\n\n\ndef migrate_nulls() -> None:\n \"\"\"migrate existing journalist_id=NULL over to deleted or delete them\"\"\"\n op.execute(\"DELETE FROM journalist_login_attempt WHERE journalist_id IS NULL;\")\n op.execute(\"DELETE FROM revoked_tokens WHERE journalist_id IS NULL;\")\n # Look to see if we have data to migrate\n tables = (\"replies\", \"seen_files\", \"seen_messages\", \"seen_replies\")\n needs_migration = []\n conn = op.get_bind()\n for table in tables:\n result = conn.execute( # nosec\n f\"SELECT 1 FROM {table} WHERE journalist_id IS NULL;\"\n ).first()\n if result is not None:\n needs_migration.append(table)\n\n if not needs_migration:\n return\n\n deleted_id = create_deleted()\n for table in needs_migration:\n # The seen_ tables have UNIQUE(fk_id, journalist_id), so the deleted journalist can only\n # have seen each item once. It is possible multiple NULL journalist have seen the same thing\n # so we do this update in two passes.\n # First we update as many rows to point to the deleted journalist as possible, ignoring any\n # unique key violations.\n op.execute(\n sa.text(\n f\"UPDATE OR IGNORE {table} SET journalist_id=:journalist_id \"\n \"WHERE journalist_id IS NULL;\"\n ).bindparams(journalist_id=deleted_id)\n )\n # Then we delete any leftovers which had been ignored earlier.\n op.execute(f\"DELETE FROM {table} WHERE journalist_id IS NULL\") # nosec\n\n\ndef upgrade() -> None:\n migrate_nulls()\n\n with op.batch_alter_table(\"journalist_login_attempt\", schema=None) as batch_op:\n batch_op.alter_column(\"journalist_id\", existing_type=sa.INTEGER(), nullable=False)\n\n with op.batch_alter_table(\"replies\", schema=None) as batch_op:\n batch_op.alter_column(\"journalist_id\", existing_type=sa.INTEGER(), nullable=False)\n\n with op.batch_alter_table(\"revoked_tokens\", schema=None) as batch_op:\n batch_op.alter_column(\"journalist_id\", existing_type=sa.INTEGER(), nullable=False)\n\n with op.batch_alter_table(\"seen_files\", schema=None) as batch_op:\n batch_op.alter_column(\"journalist_id\", existing_type=sa.INTEGER(), nullable=False)\n\n with op.batch_alter_table(\"seen_messages\", schema=None) as batch_op:\n batch_op.alter_column(\"journalist_id\", existing_type=sa.INTEGER(), nullable=False)\n\n with op.batch_alter_table(\"seen_replies\", schema=None) as batch_op:\n batch_op.alter_column(\"journalist_id\", existing_type=sa.INTEGER(), nullable=False)\n\n\ndef downgrade() -> None:\n # We do not un-migrate the data back to journalist_id=NULL\n\n with op.batch_alter_table(\"seen_replies\", schema=None) as batch_op:\n batch_op.alter_column(\"journalist_id\", existing_type=sa.INTEGER(), nullable=True)\n\n with op.batch_alter_table(\"seen_messages\", schema=None) as batch_op:\n batch_op.alter_column(\"journalist_id\", existing_type=sa.INTEGER(), nullable=True)\n\n with op.batch_alter_table(\"seen_files\", schema=None) as batch_op:\n batch_op.alter_column(\"journalist_id\", existing_type=sa.INTEGER(), nullable=True)\n\n with op.batch_alter_table(\"revoked_tokens\", schema=None) as batch_op:\n batch_op.alter_column(\"journalist_id\", existing_type=sa.INTEGER(), nullable=True)\n\n with op.batch_alter_table(\"replies\", schema=None) as batch_op:\n batch_op.alter_column(\"journalist_id\", existing_type=sa.INTEGER(), nullable=True)\n\n with op.batch_alter_table(\"journalist_login_attempt\", schema=None) as batch_op:\n batch_op.alter_column(\"journalist_id\", existing_type=sa.INTEGER(), nullable=True)\n", "path": "securedrop/alembic/versions/2e24fc7536e8_make_journalist_id_non_nullable.py"}], "after_files": [{"content": "\"\"\"make journalist_id non-nullable\n\nRevision ID: 2e24fc7536e8\nRevises: de00920916bf\nCreate Date: 2022-01-12 19:31:06.186285\n\n\"\"\"\nimport os\nimport uuid\n\nimport argon2\nimport sqlalchemy as sa\nimport two_factor\nfrom alembic import op\n\n# raise the errors if we're not in production\nraise_errors = os.environ.get(\"SECUREDROP_ENV\", \"prod\") != \"prod\"\n\ntry:\n from models import ARGON2_PARAMS\n from passphrases import PassphraseGenerator\nexcept: # noqa\n if raise_errors:\n raise\n\n\n# revision identifiers, used by Alembic.\nrevision = \"2e24fc7536e8\"\ndown_revision = \"de00920916bf\"\nbranch_labels = None\ndepends_on = None\n\n\ndef generate_passphrase_hash() -> str:\n passphrase = PassphraseGenerator.get_default().generate_passphrase()\n return argon2.PasswordHasher(**ARGON2_PARAMS).hash(passphrase)\n\n\ndef create_deleted() -> int:\n \"\"\"manually insert a \"deleted\" journalist user.\n\n We need to do it this way since the model will reflect the current state of\n the schema, not what it is at the current migration step\n\n It should be basically identical to what Journalist.get_deleted() does\n \"\"\"\n op.execute(\n sa.text(\n \"\"\"\\\n INSERT INTO journalists (uuid, username, session_nonce, passphrase_hash, otp_secret)\n VALUES (:uuid, \"deleted\", 0, :passphrase_hash, :otp_secret);\n \"\"\"\n ).bindparams(\n uuid=str(uuid.uuid4()),\n passphrase_hash=generate_passphrase_hash(),\n otp_secret=two_factor.random_base32(),\n )\n )\n # Get the autoincrement ID back\n conn = op.get_bind()\n result = conn.execute('SELECT id FROM journalists WHERE username=\"deleted\";').fetchall()\n return result[0][0]\n\n\ndef migrate_nulls() -> None:\n \"\"\"migrate existing journalist_id=NULL over to deleted or delete them\"\"\"\n op.execute(\"DELETE FROM journalist_login_attempt WHERE journalist_id IS NULL;\")\n op.execute(\"DELETE FROM revoked_tokens WHERE journalist_id IS NULL;\")\n # Look to see if we have data to migrate\n tables = (\"replies\", \"seen_files\", \"seen_messages\", \"seen_replies\")\n needs_migration = []\n conn = op.get_bind()\n for table in tables:\n result = conn.execute(\n f\"SELECT 1 FROM {table} WHERE journalist_id IS NULL;\" # nosec\n ).first()\n if result is not None:\n needs_migration.append(table)\n\n if not needs_migration:\n return\n\n deleted_id = create_deleted()\n for table in needs_migration:\n # The seen_ tables have UNIQUE(fk_id, journalist_id), so the deleted journalist can only\n # have seen each item once. It is possible multiple NULL journalist have seen the same thing\n # so we do this update in two passes.\n # First we update as many rows to point to the deleted journalist as possible, ignoring any\n # unique key violations.\n op.execute(\n sa.text(\n f\"UPDATE OR IGNORE {table} SET journalist_id=:journalist_id \" # nosec\n \"WHERE journalist_id IS NULL;\"\n ).bindparams(journalist_id=deleted_id)\n )\n # Then we delete any leftovers which had been ignored earlier.\n op.execute(f\"DELETE FROM {table} WHERE journalist_id IS NULL\") # nosec\n\n\ndef upgrade() -> None:\n migrate_nulls()\n\n with op.batch_alter_table(\"journalist_login_attempt\", schema=None) as batch_op:\n batch_op.alter_column(\"journalist_id\", existing_type=sa.INTEGER(), nullable=False)\n\n with op.batch_alter_table(\"replies\", schema=None) as batch_op:\n batch_op.alter_column(\"journalist_id\", existing_type=sa.INTEGER(), nullable=False)\n\n with op.batch_alter_table(\"revoked_tokens\", schema=None) as batch_op:\n batch_op.alter_column(\"journalist_id\", existing_type=sa.INTEGER(), nullable=False)\n\n with op.batch_alter_table(\"seen_files\", schema=None) as batch_op:\n batch_op.alter_column(\"journalist_id\", existing_type=sa.INTEGER(), nullable=False)\n\n with op.batch_alter_table(\"seen_messages\", schema=None) as batch_op:\n batch_op.alter_column(\"journalist_id\", existing_type=sa.INTEGER(), nullable=False)\n\n with op.batch_alter_table(\"seen_replies\", schema=None) as batch_op:\n batch_op.alter_column(\"journalist_id\", existing_type=sa.INTEGER(), nullable=False)\n\n\ndef downgrade() -> None:\n # We do not un-migrate the data back to journalist_id=NULL\n\n with op.batch_alter_table(\"seen_replies\", schema=None) as batch_op:\n batch_op.alter_column(\"journalist_id\", existing_type=sa.INTEGER(), nullable=True)\n\n with op.batch_alter_table(\"seen_messages\", schema=None) as batch_op:\n batch_op.alter_column(\"journalist_id\", existing_type=sa.INTEGER(), nullable=True)\n\n with op.batch_alter_table(\"seen_files\", schema=None) as batch_op:\n batch_op.alter_column(\"journalist_id\", existing_type=sa.INTEGER(), nullable=True)\n\n with op.batch_alter_table(\"revoked_tokens\", schema=None) as batch_op:\n batch_op.alter_column(\"journalist_id\", existing_type=sa.INTEGER(), nullable=True)\n\n with op.batch_alter_table(\"replies\", schema=None) as batch_op:\n batch_op.alter_column(\"journalist_id\", existing_type=sa.INTEGER(), nullable=True)\n\n with op.batch_alter_table(\"journalist_login_attempt\", schema=None) as batch_op:\n batch_op.alter_column(\"journalist_id\", existing_type=sa.INTEGER(), nullable=True)\n", "path": "securedrop/alembic/versions/2e24fc7536e8_make_journalist_id_non_nullable.py"}]}
| 2,535 | 318 |
gh_patches_debug_3371
|
rasdani/github-patches
|
git_diff
|
e2nIEE__pandapower-1661
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
pandapower.networks: nets have wrong order of columns
Example for net = nw.case24_ieee_rts():
```python
net.bus.head()
Out[43]:
in_service max_vm_pu min_vm_pu name type vn_kv zone
0 True 1.1 0.9 a b 138.0 1.0
1 True 1.1 0.9 b b 138.0 1.0
2 True 1.1 0.9 c b 138.0 1.0
3 True 1.1 0.9 d b 138.0 1.0
4 True 1.1 0.9 e b 138.0 1.0
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 # Copyright (c) 2016-2022 by University of Kassel and Fraunhofer Institute for Energy Economics
4 # and Energy System Technology (IEE), Kassel. All rights reserved.
5
6 from setuptools import setup, find_packages
7 import re
8
9 with open('README.rst', 'rb') as f:
10 install = f.read().decode('utf-8')
11
12 with open('CHANGELOG.rst', 'rb') as f:
13 changelog = f.read().decode('utf-8')
14
15 classifiers = [
16 'Development Status :: 5 - Production/Stable',
17 'Environment :: Console',
18 'Intended Audience :: Developers',
19 'Intended Audience :: Education',
20 'Intended Audience :: Science/Research',
21 'License :: OSI Approved :: BSD License',
22 'Natural Language :: English',
23 'Operating System :: OS Independent',
24 'Programming Language :: Python',
25 'Programming Language :: Python :: 3']
26
27 with open('.github/workflows/github_test_action.yml', 'rb') as f:
28 lines = f.read().decode('utf-8')
29 versions = set(re.findall('3.[7-9]', lines)) | set(re.findall('3.1[0-9]', lines))
30 for version in sorted(versions):
31 classifiers.append('Programming Language :: Python :: %s' % version)
32
33 long_description = '\n\n'.join((install, changelog))
34
35 setup(
36 name='pandapower',
37 version='2.10.1',
38 author='Leon Thurner, Alexander Scheidler',
39 author_email='[email protected], [email protected]',
40 description='An easy to use open source tool for power system modeling, analysis and optimization with a high degree of automation.',
41 long_description=long_description,
42 long_description_content_type='text/x-rst',
43 url='http://www.pandapower.org',
44 license='BSD',
45 install_requires=["pandas>=1.0",
46 "networkx>=2.5",
47 "scipy",
48 "numpy>=0.11",
49 "packaging",
50 "tqdm",
51 "deepdiff"],
52 extras_require={
53 "docs": ["numpydoc", "sphinx", "sphinx_rtd_theme"],
54 "plotting": ["plotly", "matplotlib", "python-igraph", "geopandas"],
55 # "shapely", "pyproj" are depedencies of geopandas and so already available;
56 # "base64", "hashlib", "zlib" produce installing problems, so they are not included
57 "test": ["pytest", "pytest-xdist"],
58 "performance": ["ortools"], # , "lightsim2grid"],
59 "fileio": ["xlsxwriter", "openpyxl", "cryptography", "geopandas"],
60 # "fiona" is a depedency of geopandas and so already available
61 "converter": ["matpowercaseframes"],
62 "all": ["numpydoc", "sphinx", "sphinx_rtd_theme",
63 "plotly", "matplotlib", "python-igraph", "geopandas",
64 "pytest", "pytest-xdist",
65 "ortools", # lightsim2grid,
66 "xlsxwriter", "openpyxl", "cryptography",
67 "matpowercaseframes"
68 ]}, # "shapely", "pyproj", "fiona" are depedencies of geopandas and so already available
69 # "hashlib", "zlib", "base64" produce installing problems, so it is not included
70 packages=find_packages(),
71 include_package_data=True,
72 classifiers=classifiers
73 )
74
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -45,7 +45,7 @@
install_requires=["pandas>=1.0",
"networkx>=2.5",
"scipy",
- "numpy>=0.11",
+ "numpy",
"packaging",
"tqdm",
"deepdiff"],
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -45,7 +45,7 @@\n install_requires=[\"pandas>=1.0\",\n \"networkx>=2.5\",\n \"scipy\",\n- \"numpy>=0.11\",\n+ \"numpy\",\n \"packaging\",\n \"tqdm\",\n \"deepdiff\"],\n", "issue": "pandapower.networks: nets have wrong order of columns\nExample for net = nw.case24_ieee_rts():\r\n\r\n```python\r\nnet.bus.head()\r\nOut[43]: \r\n in_service max_vm_pu min_vm_pu name type vn_kv zone\r\n0 True 1.1 0.9 a b 138.0 1.0\r\n1 True 1.1 0.9 b b 138.0 1.0\r\n2 True 1.1 0.9 c b 138.0 1.0\r\n3 True 1.1 0.9 d b 138.0 1.0\r\n4 True 1.1 0.9 e b 138.0 1.0\r\n```\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright (c) 2016-2022 by University of Kassel and Fraunhofer Institute for Energy Economics\n# and Energy System Technology (IEE), Kassel. All rights reserved.\n\nfrom setuptools import setup, find_packages\nimport re\n\nwith open('README.rst', 'rb') as f:\n install = f.read().decode('utf-8')\n\nwith open('CHANGELOG.rst', 'rb') as f:\n changelog = f.read().decode('utf-8')\n\nclassifiers = [\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: BSD License',\n 'Natural Language :: English',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3']\n\nwith open('.github/workflows/github_test_action.yml', 'rb') as f:\n lines = f.read().decode('utf-8')\n versions = set(re.findall('3.[7-9]', lines)) | set(re.findall('3.1[0-9]', lines))\n for version in sorted(versions):\n classifiers.append('Programming Language :: Python :: %s' % version)\n\nlong_description = '\\n\\n'.join((install, changelog))\n\nsetup(\n name='pandapower',\n version='2.10.1',\n author='Leon Thurner, Alexander Scheidler',\n author_email='[email protected], [email protected]',\n description='An easy to use open source tool for power system modeling, analysis and optimization with a high degree of automation.',\n long_description=long_description,\n long_description_content_type='text/x-rst',\n url='http://www.pandapower.org',\n license='BSD',\n install_requires=[\"pandas>=1.0\",\n \"networkx>=2.5\",\n \"scipy\",\n \"numpy>=0.11\",\n \"packaging\",\n \"tqdm\",\n \"deepdiff\"],\n extras_require={\n \"docs\": [\"numpydoc\", \"sphinx\", \"sphinx_rtd_theme\"],\n \"plotting\": [\"plotly\", \"matplotlib\", \"python-igraph\", \"geopandas\"],\n # \"shapely\", \"pyproj\" are depedencies of geopandas and so already available;\n # \"base64\", \"hashlib\", \"zlib\" produce installing problems, so they are not included\n \"test\": [\"pytest\", \"pytest-xdist\"],\n \"performance\": [\"ortools\"], # , \"lightsim2grid\"],\n \"fileio\": [\"xlsxwriter\", \"openpyxl\", \"cryptography\", \"geopandas\"],\n # \"fiona\" is a depedency of geopandas and so already available\n \"converter\": [\"matpowercaseframes\"],\n \"all\": [\"numpydoc\", \"sphinx\", \"sphinx_rtd_theme\",\n \"plotly\", \"matplotlib\", \"python-igraph\", \"geopandas\",\n \"pytest\", \"pytest-xdist\",\n \"ortools\", # lightsim2grid,\n \"xlsxwriter\", \"openpyxl\", \"cryptography\",\n \"matpowercaseframes\"\n ]}, # \"shapely\", \"pyproj\", \"fiona\" are depedencies of geopandas and so already available\n # \"hashlib\", \"zlib\", \"base64\" produce installing problems, so it is not included\n packages=find_packages(),\n include_package_data=True,\n classifiers=classifiers\n)\n", "path": "setup.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright (c) 2016-2022 by University of Kassel and Fraunhofer Institute for Energy Economics\n# and Energy System Technology (IEE), Kassel. All rights reserved.\n\nfrom setuptools import setup, find_packages\nimport re\n\nwith open('README.rst', 'rb') as f:\n install = f.read().decode('utf-8')\n\nwith open('CHANGELOG.rst', 'rb') as f:\n changelog = f.read().decode('utf-8')\n\nclassifiers = [\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: BSD License',\n 'Natural Language :: English',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3']\n\nwith open('.github/workflows/github_test_action.yml', 'rb') as f:\n lines = f.read().decode('utf-8')\n versions = set(re.findall('3.[7-9]', lines)) | set(re.findall('3.1[0-9]', lines))\n for version in sorted(versions):\n classifiers.append('Programming Language :: Python :: %s' % version)\n\nlong_description = '\\n\\n'.join((install, changelog))\n\nsetup(\n name='pandapower',\n version='2.10.1',\n author='Leon Thurner, Alexander Scheidler',\n author_email='[email protected], [email protected]',\n description='An easy to use open source tool for power system modeling, analysis and optimization with a high degree of automation.',\n long_description=long_description,\n long_description_content_type='text/x-rst',\n url='http://www.pandapower.org',\n license='BSD',\n install_requires=[\"pandas>=1.0\",\n \"networkx>=2.5\",\n \"scipy\",\n \"numpy\",\n \"packaging\",\n \"tqdm\",\n \"deepdiff\"],\n extras_require={\n \"docs\": [\"numpydoc\", \"sphinx\", \"sphinx_rtd_theme\"],\n \"plotting\": [\"plotly\", \"matplotlib\", \"python-igraph\", \"geopandas\"],\n # \"shapely\", \"pyproj\" are depedencies of geopandas and so already available;\n # \"base64\", \"hashlib\", \"zlib\" produce installing problems, so they are not included\n \"test\": [\"pytest\", \"pytest-xdist\"],\n \"performance\": [\"ortools\"], # , \"lightsim2grid\"],\n \"fileio\": [\"xlsxwriter\", \"openpyxl\", \"cryptography\", \"geopandas\"],\n # \"fiona\" is a depedency of geopandas and so already available\n \"converter\": [\"matpowercaseframes\"],\n \"all\": [\"numpydoc\", \"sphinx\", \"sphinx_rtd_theme\",\n \"plotly\", \"matplotlib\", \"python-igraph\", \"geopandas\",\n \"pytest\", \"pytest-xdist\",\n \"ortools\", # lightsim2grid,\n \"xlsxwriter\", \"openpyxl\", \"cryptography\",\n \"matpowercaseframes\"\n ]}, # \"shapely\", \"pyproj\", \"fiona\" are depedencies of geopandas and so already available\n # \"hashlib\", \"zlib\", \"base64\" produce installing problems, so it is not included\n packages=find_packages(),\n include_package_data=True,\n classifiers=classifiers\n)\n", "path": "setup.py"}]}
| 1,423 | 88 |
gh_patches_debug_44158
|
rasdani/github-patches
|
git_diff
|
open-telemetry__opentelemetry-python-contrib-1242
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Metrics instrumentation pyramid
HTTP metrics semconv: https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/semantic_conventions/http-metrics.md
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `instrumentation/opentelemetry-instrumentation-pyramid/src/opentelemetry/instrumentation/pyramid/callbacks.py`
Content:
```
1 # Copyright The OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from logging import getLogger
16 from time import time_ns
17
18 from pyramid.events import BeforeTraversal
19 from pyramid.httpexceptions import HTTPException, HTTPServerError
20 from pyramid.settings import asbool
21 from pyramid.tweens import EXCVIEW
22
23 import opentelemetry.instrumentation.wsgi as otel_wsgi
24 from opentelemetry import context, trace
25 from opentelemetry.instrumentation.propagators import (
26 get_global_response_propagator,
27 )
28 from opentelemetry.instrumentation.pyramid.version import __version__
29 from opentelemetry.instrumentation.utils import _start_internal_or_server_span
30 from opentelemetry.semconv.trace import SpanAttributes
31 from opentelemetry.util.http import get_excluded_urls
32
33 TWEEN_NAME = "opentelemetry.instrumentation.pyramid.trace_tween_factory"
34 SETTING_TRACE_ENABLED = "opentelemetry-pyramid.trace_enabled"
35
36 _ENVIRON_STARTTIME_KEY = "opentelemetry-pyramid.starttime_key"
37 _ENVIRON_SPAN_KEY = "opentelemetry-pyramid.span_key"
38 _ENVIRON_ACTIVATION_KEY = "opentelemetry-pyramid.activation_key"
39 _ENVIRON_ENABLED_KEY = "opentelemetry-pyramid.tracing_enabled_key"
40 _ENVIRON_TOKEN = "opentelemetry-pyramid.token"
41
42 _logger = getLogger(__name__)
43
44
45 _excluded_urls = get_excluded_urls("PYRAMID")
46
47
48 def includeme(config):
49 config.add_settings({SETTING_TRACE_ENABLED: True})
50
51 config.add_subscriber(_before_traversal, BeforeTraversal)
52 _insert_tween(config)
53
54
55 def _insert_tween(config):
56 settings = config.get_settings()
57 tweens = settings.get("pyramid.tweens")
58 # If the list is empty, pyramid does not consider the tweens have been
59 # set explicitly. And if our tween is already there, nothing to do
60 if not tweens or not tweens.strip():
61 # Add our tween just before the default exception handler
62 config.add_tween(TWEEN_NAME, over=EXCVIEW)
63
64
65 def _before_traversal(event):
66 request = event.request
67 request_environ = request.environ
68 span_name = otel_wsgi.get_default_span_name(request_environ)
69
70 enabled = request_environ.get(_ENVIRON_ENABLED_KEY)
71 if enabled is None:
72 _logger.warning(
73 "Opentelemetry pyramid tween 'opentelemetry.instrumentation.pyramid.trace_tween_factory'"
74 "was not called. Make sure that the tween is included in 'pyramid.tweens' if"
75 "the tween list was created manually"
76 )
77 return
78
79 if not enabled:
80 # Tracing not enabled, return
81 return
82
83 start_time = request_environ.get(_ENVIRON_STARTTIME_KEY)
84 tracer = trace.get_tracer(__name__, __version__)
85
86 if request.matched_route:
87 span_name = request.matched_route.pattern
88 else:
89 span_name = otel_wsgi.get_default_span_name(request_environ)
90
91 span, token = _start_internal_or_server_span(
92 tracer=tracer,
93 span_name=span_name,
94 start_time=start_time,
95 context_carrier=request_environ,
96 context_getter=otel_wsgi.wsgi_getter,
97 )
98
99 if span.is_recording():
100 attributes = otel_wsgi.collect_request_attributes(request_environ)
101 if request.matched_route:
102 attributes[
103 SpanAttributes.HTTP_ROUTE
104 ] = request.matched_route.pattern
105 for key, value in attributes.items():
106 span.set_attribute(key, value)
107 if span.kind == trace.SpanKind.SERVER:
108 custom_attributes = (
109 otel_wsgi.collect_custom_request_headers_attributes(
110 request_environ
111 )
112 )
113 if len(custom_attributes) > 0:
114 span.set_attributes(custom_attributes)
115
116 activation = trace.use_span(span, end_on_exit=True)
117 activation.__enter__() # pylint: disable=E1101
118 request_environ[_ENVIRON_ACTIVATION_KEY] = activation
119 request_environ[_ENVIRON_SPAN_KEY] = span
120 if token:
121 request_environ[_ENVIRON_TOKEN] = token
122
123
124 def trace_tween_factory(handler, registry):
125 settings = registry.settings
126 enabled = asbool(settings.get(SETTING_TRACE_ENABLED, True))
127
128 if not enabled:
129 # If disabled, make a tween that signals to the
130 # BeforeTraversal subscriber that tracing is disabled
131 def disabled_tween(request):
132 request.environ[_ENVIRON_ENABLED_KEY] = False
133 return handler(request)
134
135 return disabled_tween
136
137 # make a request tracing function
138 # pylint: disable=too-many-branches
139 def trace_tween(request):
140 # pylint: disable=E1101
141 if _excluded_urls.url_disabled(request.url):
142 request.environ[_ENVIRON_ENABLED_KEY] = False
143 # short-circuit when we don't want to trace anything
144 return handler(request)
145
146 request.environ[_ENVIRON_ENABLED_KEY] = True
147 request.environ[_ENVIRON_STARTTIME_KEY] = time_ns()
148
149 response = None
150 status = None
151
152 try:
153 response = handler(request)
154 except HTTPException as exc:
155 # If the exception is a pyramid HTTPException,
156 # that's still valuable information that isn't necessarily
157 # a 500. For instance, HTTPFound is a 302.
158 # As described in docs, Pyramid exceptions are all valid
159 # response types
160 response = exc
161 raise
162 except BaseException:
163 # In the case that a non-HTTPException is bubbled up we
164 # should infer a internal server error and raise
165 status = "500 InternalServerError"
166 raise
167 finally:
168 span = request.environ.get(_ENVIRON_SPAN_KEY)
169 enabled = request.environ.get(_ENVIRON_ENABLED_KEY)
170 if not span and enabled:
171 _logger.warning(
172 "Pyramid environ's OpenTelemetry span missing."
173 "If the OpenTelemetry tween was added manually, make sure"
174 "PyramidInstrumentor().instrument_config(config) is called"
175 )
176 elif enabled:
177 status = getattr(response, "status", status)
178
179 if status is not None:
180 otel_wsgi.add_response_attributes(
181 span,
182 status,
183 getattr(response, "headerlist", None),
184 )
185
186 if span.is_recording() and span.kind == trace.SpanKind.SERVER:
187 custom_attributes = (
188 otel_wsgi.collect_custom_response_headers_attributes(
189 getattr(response, "headerlist", None)
190 )
191 )
192 if len(custom_attributes) > 0:
193 span.set_attributes(custom_attributes)
194
195 propagator = get_global_response_propagator()
196 if propagator and hasattr(response, "headers"):
197 propagator.inject(response.headers)
198
199 activation = request.environ.get(_ENVIRON_ACTIVATION_KEY)
200
201 # Only considering HTTPServerError
202 # to make sure 200, 300 and 400 exceptions are not reported as error
203 if isinstance(response, HTTPServerError):
204 activation.__exit__(
205 type(response),
206 response,
207 getattr(response, "__traceback__", None),
208 )
209 else:
210 activation.__exit__(None, None, None)
211
212 env_token = request.environ.get(_ENVIRON_TOKEN, None)
213 if env_token is not None:
214 context.detach(env_token)
215
216 return response
217
218 return trace_tween
219
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/instrumentation/opentelemetry-instrumentation-pyramid/src/opentelemetry/instrumentation/pyramid/callbacks.py b/instrumentation/opentelemetry-instrumentation-pyramid/src/opentelemetry/instrumentation/pyramid/callbacks.py
--- a/instrumentation/opentelemetry-instrumentation-pyramid/src/opentelemetry/instrumentation/pyramid/callbacks.py
+++ b/instrumentation/opentelemetry-instrumentation-pyramid/src/opentelemetry/instrumentation/pyramid/callbacks.py
@@ -14,6 +14,7 @@
from logging import getLogger
from time import time_ns
+from timeit import default_timer
from pyramid.events import BeforeTraversal
from pyramid.httpexceptions import HTTPException, HTTPServerError
@@ -27,6 +28,7 @@
)
from opentelemetry.instrumentation.pyramid.version import __version__
from opentelemetry.instrumentation.utils import _start_internal_or_server_span
+from opentelemetry.metrics import get_meter
from opentelemetry.semconv.trace import SpanAttributes
from opentelemetry.util.http import get_excluded_urls
@@ -122,8 +124,20 @@
def trace_tween_factory(handler, registry):
+ # pylint: disable=too-many-statements
settings = registry.settings
enabled = asbool(settings.get(SETTING_TRACE_ENABLED, True))
+ meter = get_meter(__name__, __version__)
+ duration_histogram = meter.create_histogram(
+ name="http.server.duration",
+ unit="ms",
+ description="measures the duration of the inbound HTTP request",
+ )
+ active_requests_counter = meter.create_up_down_counter(
+ name="http.server.active_requests",
+ unit="requests",
+ description="measures the number of concurrent HTTP requests that are currently in-flight",
+ )
if not enabled:
# If disabled, make a tween that signals to the
@@ -137,14 +151,23 @@
# make a request tracing function
# pylint: disable=too-many-branches
def trace_tween(request):
- # pylint: disable=E1101
+ # pylint: disable=E1101, too-many-locals
if _excluded_urls.url_disabled(request.url):
request.environ[_ENVIRON_ENABLED_KEY] = False
# short-circuit when we don't want to trace anything
return handler(request)
+ attributes = otel_wsgi.collect_request_attributes(request.environ)
+
request.environ[_ENVIRON_ENABLED_KEY] = True
request.environ[_ENVIRON_STARTTIME_KEY] = time_ns()
+ active_requests_count_attrs = (
+ otel_wsgi._parse_active_request_count_attrs(attributes)
+ )
+ duration_attrs = otel_wsgi._parse_duration_attrs(attributes)
+
+ start = default_timer()
+ active_requests_counter.add(1, active_requests_count_attrs)
response = None
status = None
@@ -165,6 +188,15 @@
status = "500 InternalServerError"
raise
finally:
+ duration = max(round((default_timer() - start) * 1000), 0)
+ status = getattr(response, "status", status)
+ status_code = otel_wsgi._parse_status_code(status)
+ if status_code is not None:
+ duration_attrs[
+ SpanAttributes.HTTP_STATUS_CODE
+ ] = otel_wsgi._parse_status_code(status)
+ duration_histogram.record(duration, duration_attrs)
+ active_requests_counter.add(-1, active_requests_count_attrs)
span = request.environ.get(_ENVIRON_SPAN_KEY)
enabled = request.environ.get(_ENVIRON_ENABLED_KEY)
if not span and enabled:
@@ -174,7 +206,6 @@
"PyramidInstrumentor().instrument_config(config) is called"
)
elif enabled:
- status = getattr(response, "status", status)
if status is not None:
otel_wsgi.add_response_attributes(
|
{"golden_diff": "diff --git a/instrumentation/opentelemetry-instrumentation-pyramid/src/opentelemetry/instrumentation/pyramid/callbacks.py b/instrumentation/opentelemetry-instrumentation-pyramid/src/opentelemetry/instrumentation/pyramid/callbacks.py\n--- a/instrumentation/opentelemetry-instrumentation-pyramid/src/opentelemetry/instrumentation/pyramid/callbacks.py\n+++ b/instrumentation/opentelemetry-instrumentation-pyramid/src/opentelemetry/instrumentation/pyramid/callbacks.py\n@@ -14,6 +14,7 @@\n \n from logging import getLogger\n from time import time_ns\n+from timeit import default_timer\n \n from pyramid.events import BeforeTraversal\n from pyramid.httpexceptions import HTTPException, HTTPServerError\n@@ -27,6 +28,7 @@\n )\n from opentelemetry.instrumentation.pyramid.version import __version__\n from opentelemetry.instrumentation.utils import _start_internal_or_server_span\n+from opentelemetry.metrics import get_meter\n from opentelemetry.semconv.trace import SpanAttributes\n from opentelemetry.util.http import get_excluded_urls\n \n@@ -122,8 +124,20 @@\n \n \n def trace_tween_factory(handler, registry):\n+ # pylint: disable=too-many-statements\n settings = registry.settings\n enabled = asbool(settings.get(SETTING_TRACE_ENABLED, True))\n+ meter = get_meter(__name__, __version__)\n+ duration_histogram = meter.create_histogram(\n+ name=\"http.server.duration\",\n+ unit=\"ms\",\n+ description=\"measures the duration of the inbound HTTP request\",\n+ )\n+ active_requests_counter = meter.create_up_down_counter(\n+ name=\"http.server.active_requests\",\n+ unit=\"requests\",\n+ description=\"measures the number of concurrent HTTP requests that are currently in-flight\",\n+ )\n \n if not enabled:\n # If disabled, make a tween that signals to the\n@@ -137,14 +151,23 @@\n # make a request tracing function\n # pylint: disable=too-many-branches\n def trace_tween(request):\n- # pylint: disable=E1101\n+ # pylint: disable=E1101, too-many-locals\n if _excluded_urls.url_disabled(request.url):\n request.environ[_ENVIRON_ENABLED_KEY] = False\n # short-circuit when we don't want to trace anything\n return handler(request)\n \n+ attributes = otel_wsgi.collect_request_attributes(request.environ)\n+\n request.environ[_ENVIRON_ENABLED_KEY] = True\n request.environ[_ENVIRON_STARTTIME_KEY] = time_ns()\n+ active_requests_count_attrs = (\n+ otel_wsgi._parse_active_request_count_attrs(attributes)\n+ )\n+ duration_attrs = otel_wsgi._parse_duration_attrs(attributes)\n+\n+ start = default_timer()\n+ active_requests_counter.add(1, active_requests_count_attrs)\n \n response = None\n status = None\n@@ -165,6 +188,15 @@\n status = \"500 InternalServerError\"\n raise\n finally:\n+ duration = max(round((default_timer() - start) * 1000), 0)\n+ status = getattr(response, \"status\", status)\n+ status_code = otel_wsgi._parse_status_code(status)\n+ if status_code is not None:\n+ duration_attrs[\n+ SpanAttributes.HTTP_STATUS_CODE\n+ ] = otel_wsgi._parse_status_code(status)\n+ duration_histogram.record(duration, duration_attrs)\n+ active_requests_counter.add(-1, active_requests_count_attrs)\n span = request.environ.get(_ENVIRON_SPAN_KEY)\n enabled = request.environ.get(_ENVIRON_ENABLED_KEY)\n if not span and enabled:\n@@ -174,7 +206,6 @@\n \"PyramidInstrumentor().instrument_config(config) is called\"\n )\n elif enabled:\n- status = getattr(response, \"status\", status)\n \n if status is not None:\n otel_wsgi.add_response_attributes(\n", "issue": "Metrics instrumentation pyramid\nHTTP metrics semconv: https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/semantic_conventions/http-metrics.md\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom logging import getLogger\nfrom time import time_ns\n\nfrom pyramid.events import BeforeTraversal\nfrom pyramid.httpexceptions import HTTPException, HTTPServerError\nfrom pyramid.settings import asbool\nfrom pyramid.tweens import EXCVIEW\n\nimport opentelemetry.instrumentation.wsgi as otel_wsgi\nfrom opentelemetry import context, trace\nfrom opentelemetry.instrumentation.propagators import (\n get_global_response_propagator,\n)\nfrom opentelemetry.instrumentation.pyramid.version import __version__\nfrom opentelemetry.instrumentation.utils import _start_internal_or_server_span\nfrom opentelemetry.semconv.trace import SpanAttributes\nfrom opentelemetry.util.http import get_excluded_urls\n\nTWEEN_NAME = \"opentelemetry.instrumentation.pyramid.trace_tween_factory\"\nSETTING_TRACE_ENABLED = \"opentelemetry-pyramid.trace_enabled\"\n\n_ENVIRON_STARTTIME_KEY = \"opentelemetry-pyramid.starttime_key\"\n_ENVIRON_SPAN_KEY = \"opentelemetry-pyramid.span_key\"\n_ENVIRON_ACTIVATION_KEY = \"opentelemetry-pyramid.activation_key\"\n_ENVIRON_ENABLED_KEY = \"opentelemetry-pyramid.tracing_enabled_key\"\n_ENVIRON_TOKEN = \"opentelemetry-pyramid.token\"\n\n_logger = getLogger(__name__)\n\n\n_excluded_urls = get_excluded_urls(\"PYRAMID\")\n\n\ndef includeme(config):\n config.add_settings({SETTING_TRACE_ENABLED: True})\n\n config.add_subscriber(_before_traversal, BeforeTraversal)\n _insert_tween(config)\n\n\ndef _insert_tween(config):\n settings = config.get_settings()\n tweens = settings.get(\"pyramid.tweens\")\n # If the list is empty, pyramid does not consider the tweens have been\n # set explicitly. And if our tween is already there, nothing to do\n if not tweens or not tweens.strip():\n # Add our tween just before the default exception handler\n config.add_tween(TWEEN_NAME, over=EXCVIEW)\n\n\ndef _before_traversal(event):\n request = event.request\n request_environ = request.environ\n span_name = otel_wsgi.get_default_span_name(request_environ)\n\n enabled = request_environ.get(_ENVIRON_ENABLED_KEY)\n if enabled is None:\n _logger.warning(\n \"Opentelemetry pyramid tween 'opentelemetry.instrumentation.pyramid.trace_tween_factory'\"\n \"was not called. Make sure that the tween is included in 'pyramid.tweens' if\"\n \"the tween list was created manually\"\n )\n return\n\n if not enabled:\n # Tracing not enabled, return\n return\n\n start_time = request_environ.get(_ENVIRON_STARTTIME_KEY)\n tracer = trace.get_tracer(__name__, __version__)\n\n if request.matched_route:\n span_name = request.matched_route.pattern\n else:\n span_name = otel_wsgi.get_default_span_name(request_environ)\n\n span, token = _start_internal_or_server_span(\n tracer=tracer,\n span_name=span_name,\n start_time=start_time,\n context_carrier=request_environ,\n context_getter=otel_wsgi.wsgi_getter,\n )\n\n if span.is_recording():\n attributes = otel_wsgi.collect_request_attributes(request_environ)\n if request.matched_route:\n attributes[\n SpanAttributes.HTTP_ROUTE\n ] = request.matched_route.pattern\n for key, value in attributes.items():\n span.set_attribute(key, value)\n if span.kind == trace.SpanKind.SERVER:\n custom_attributes = (\n otel_wsgi.collect_custom_request_headers_attributes(\n request_environ\n )\n )\n if len(custom_attributes) > 0:\n span.set_attributes(custom_attributes)\n\n activation = trace.use_span(span, end_on_exit=True)\n activation.__enter__() # pylint: disable=E1101\n request_environ[_ENVIRON_ACTIVATION_KEY] = activation\n request_environ[_ENVIRON_SPAN_KEY] = span\n if token:\n request_environ[_ENVIRON_TOKEN] = token\n\n\ndef trace_tween_factory(handler, registry):\n settings = registry.settings\n enabled = asbool(settings.get(SETTING_TRACE_ENABLED, True))\n\n if not enabled:\n # If disabled, make a tween that signals to the\n # BeforeTraversal subscriber that tracing is disabled\n def disabled_tween(request):\n request.environ[_ENVIRON_ENABLED_KEY] = False\n return handler(request)\n\n return disabled_tween\n\n # make a request tracing function\n # pylint: disable=too-many-branches\n def trace_tween(request):\n # pylint: disable=E1101\n if _excluded_urls.url_disabled(request.url):\n request.environ[_ENVIRON_ENABLED_KEY] = False\n # short-circuit when we don't want to trace anything\n return handler(request)\n\n request.environ[_ENVIRON_ENABLED_KEY] = True\n request.environ[_ENVIRON_STARTTIME_KEY] = time_ns()\n\n response = None\n status = None\n\n try:\n response = handler(request)\n except HTTPException as exc:\n # If the exception is a pyramid HTTPException,\n # that's still valuable information that isn't necessarily\n # a 500. For instance, HTTPFound is a 302.\n # As described in docs, Pyramid exceptions are all valid\n # response types\n response = exc\n raise\n except BaseException:\n # In the case that a non-HTTPException is bubbled up we\n # should infer a internal server error and raise\n status = \"500 InternalServerError\"\n raise\n finally:\n span = request.environ.get(_ENVIRON_SPAN_KEY)\n enabled = request.environ.get(_ENVIRON_ENABLED_KEY)\n if not span and enabled:\n _logger.warning(\n \"Pyramid environ's OpenTelemetry span missing.\"\n \"If the OpenTelemetry tween was added manually, make sure\"\n \"PyramidInstrumentor().instrument_config(config) is called\"\n )\n elif enabled:\n status = getattr(response, \"status\", status)\n\n if status is not None:\n otel_wsgi.add_response_attributes(\n span,\n status,\n getattr(response, \"headerlist\", None),\n )\n\n if span.is_recording() and span.kind == trace.SpanKind.SERVER:\n custom_attributes = (\n otel_wsgi.collect_custom_response_headers_attributes(\n getattr(response, \"headerlist\", None)\n )\n )\n if len(custom_attributes) > 0:\n span.set_attributes(custom_attributes)\n\n propagator = get_global_response_propagator()\n if propagator and hasattr(response, \"headers\"):\n propagator.inject(response.headers)\n\n activation = request.environ.get(_ENVIRON_ACTIVATION_KEY)\n\n # Only considering HTTPServerError\n # to make sure 200, 300 and 400 exceptions are not reported as error\n if isinstance(response, HTTPServerError):\n activation.__exit__(\n type(response),\n response,\n getattr(response, \"__traceback__\", None),\n )\n else:\n activation.__exit__(None, None, None)\n\n env_token = request.environ.get(_ENVIRON_TOKEN, None)\n if env_token is not None:\n context.detach(env_token)\n\n return response\n\n return trace_tween\n", "path": "instrumentation/opentelemetry-instrumentation-pyramid/src/opentelemetry/instrumentation/pyramid/callbacks.py"}], "after_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom logging import getLogger\nfrom time import time_ns\nfrom timeit import default_timer\n\nfrom pyramid.events import BeforeTraversal\nfrom pyramid.httpexceptions import HTTPException, HTTPServerError\nfrom pyramid.settings import asbool\nfrom pyramid.tweens import EXCVIEW\n\nimport opentelemetry.instrumentation.wsgi as otel_wsgi\nfrom opentelemetry import context, trace\nfrom opentelemetry.instrumentation.propagators import (\n get_global_response_propagator,\n)\nfrom opentelemetry.instrumentation.pyramid.version import __version__\nfrom opentelemetry.instrumentation.utils import _start_internal_or_server_span\nfrom opentelemetry.metrics import get_meter\nfrom opentelemetry.semconv.trace import SpanAttributes\nfrom opentelemetry.util.http import get_excluded_urls\n\nTWEEN_NAME = \"opentelemetry.instrumentation.pyramid.trace_tween_factory\"\nSETTING_TRACE_ENABLED = \"opentelemetry-pyramid.trace_enabled\"\n\n_ENVIRON_STARTTIME_KEY = \"opentelemetry-pyramid.starttime_key\"\n_ENVIRON_SPAN_KEY = \"opentelemetry-pyramid.span_key\"\n_ENVIRON_ACTIVATION_KEY = \"opentelemetry-pyramid.activation_key\"\n_ENVIRON_ENABLED_KEY = \"opentelemetry-pyramid.tracing_enabled_key\"\n_ENVIRON_TOKEN = \"opentelemetry-pyramid.token\"\n\n_logger = getLogger(__name__)\n\n\n_excluded_urls = get_excluded_urls(\"PYRAMID\")\n\n\ndef includeme(config):\n config.add_settings({SETTING_TRACE_ENABLED: True})\n\n config.add_subscriber(_before_traversal, BeforeTraversal)\n _insert_tween(config)\n\n\ndef _insert_tween(config):\n settings = config.get_settings()\n tweens = settings.get(\"pyramid.tweens\")\n # If the list is empty, pyramid does not consider the tweens have been\n # set explicitly. And if our tween is already there, nothing to do\n if not tweens or not tweens.strip():\n # Add our tween just before the default exception handler\n config.add_tween(TWEEN_NAME, over=EXCVIEW)\n\n\ndef _before_traversal(event):\n request = event.request\n request_environ = request.environ\n span_name = otel_wsgi.get_default_span_name(request_environ)\n\n enabled = request_environ.get(_ENVIRON_ENABLED_KEY)\n if enabled is None:\n _logger.warning(\n \"Opentelemetry pyramid tween 'opentelemetry.instrumentation.pyramid.trace_tween_factory'\"\n \"was not called. Make sure that the tween is included in 'pyramid.tweens' if\"\n \"the tween list was created manually\"\n )\n return\n\n if not enabled:\n # Tracing not enabled, return\n return\n\n start_time = request_environ.get(_ENVIRON_STARTTIME_KEY)\n tracer = trace.get_tracer(__name__, __version__)\n\n if request.matched_route:\n span_name = request.matched_route.pattern\n else:\n span_name = otel_wsgi.get_default_span_name(request_environ)\n\n span, token = _start_internal_or_server_span(\n tracer=tracer,\n span_name=span_name,\n start_time=start_time,\n context_carrier=request_environ,\n context_getter=otel_wsgi.wsgi_getter,\n )\n\n if span.is_recording():\n attributes = otel_wsgi.collect_request_attributes(request_environ)\n if request.matched_route:\n attributes[\n SpanAttributes.HTTP_ROUTE\n ] = request.matched_route.pattern\n for key, value in attributes.items():\n span.set_attribute(key, value)\n if span.kind == trace.SpanKind.SERVER:\n custom_attributes = (\n otel_wsgi.collect_custom_request_headers_attributes(\n request_environ\n )\n )\n if len(custom_attributes) > 0:\n span.set_attributes(custom_attributes)\n\n activation = trace.use_span(span, end_on_exit=True)\n activation.__enter__() # pylint: disable=E1101\n request_environ[_ENVIRON_ACTIVATION_KEY] = activation\n request_environ[_ENVIRON_SPAN_KEY] = span\n if token:\n request_environ[_ENVIRON_TOKEN] = token\n\n\ndef trace_tween_factory(handler, registry):\n # pylint: disable=too-many-statements\n settings = registry.settings\n enabled = asbool(settings.get(SETTING_TRACE_ENABLED, True))\n meter = get_meter(__name__, __version__)\n duration_histogram = meter.create_histogram(\n name=\"http.server.duration\",\n unit=\"ms\",\n description=\"measures the duration of the inbound HTTP request\",\n )\n active_requests_counter = meter.create_up_down_counter(\n name=\"http.server.active_requests\",\n unit=\"requests\",\n description=\"measures the number of concurrent HTTP requests that are currently in-flight\",\n )\n\n if not enabled:\n # If disabled, make a tween that signals to the\n # BeforeTraversal subscriber that tracing is disabled\n def disabled_tween(request):\n request.environ[_ENVIRON_ENABLED_KEY] = False\n return handler(request)\n\n return disabled_tween\n\n # make a request tracing function\n # pylint: disable=too-many-branches\n def trace_tween(request):\n # pylint: disable=E1101, too-many-locals\n if _excluded_urls.url_disabled(request.url):\n request.environ[_ENVIRON_ENABLED_KEY] = False\n # short-circuit when we don't want to trace anything\n return handler(request)\n\n attributes = otel_wsgi.collect_request_attributes(request.environ)\n\n request.environ[_ENVIRON_ENABLED_KEY] = True\n request.environ[_ENVIRON_STARTTIME_KEY] = time_ns()\n active_requests_count_attrs = (\n otel_wsgi._parse_active_request_count_attrs(attributes)\n )\n duration_attrs = otel_wsgi._parse_duration_attrs(attributes)\n\n start = default_timer()\n active_requests_counter.add(1, active_requests_count_attrs)\n\n response = None\n status = None\n\n try:\n response = handler(request)\n except HTTPException as exc:\n # If the exception is a pyramid HTTPException,\n # that's still valuable information that isn't necessarily\n # a 500. For instance, HTTPFound is a 302.\n # As described in docs, Pyramid exceptions are all valid\n # response types\n response = exc\n raise\n except BaseException:\n # In the case that a non-HTTPException is bubbled up we\n # should infer a internal server error and raise\n status = \"500 InternalServerError\"\n raise\n finally:\n duration = max(round((default_timer() - start) * 1000), 0)\n status = getattr(response, \"status\", status)\n status_code = otel_wsgi._parse_status_code(status)\n if status_code is not None:\n duration_attrs[\n SpanAttributes.HTTP_STATUS_CODE\n ] = otel_wsgi._parse_status_code(status)\n duration_histogram.record(duration, duration_attrs)\n active_requests_counter.add(-1, active_requests_count_attrs)\n span = request.environ.get(_ENVIRON_SPAN_KEY)\n enabled = request.environ.get(_ENVIRON_ENABLED_KEY)\n if not span and enabled:\n _logger.warning(\n \"Pyramid environ's OpenTelemetry span missing.\"\n \"If the OpenTelemetry tween was added manually, make sure\"\n \"PyramidInstrumentor().instrument_config(config) is called\"\n )\n elif enabled:\n\n if status is not None:\n otel_wsgi.add_response_attributes(\n span,\n status,\n getattr(response, \"headerlist\", None),\n )\n\n if span.is_recording() and span.kind == trace.SpanKind.SERVER:\n custom_attributes = (\n otel_wsgi.collect_custom_response_headers_attributes(\n getattr(response, \"headerlist\", None)\n )\n )\n if len(custom_attributes) > 0:\n span.set_attributes(custom_attributes)\n\n propagator = get_global_response_propagator()\n if propagator and hasattr(response, \"headers\"):\n propagator.inject(response.headers)\n\n activation = request.environ.get(_ENVIRON_ACTIVATION_KEY)\n\n # Only considering HTTPServerError\n # to make sure 200, 300 and 400 exceptions are not reported as error\n if isinstance(response, HTTPServerError):\n activation.__exit__(\n type(response),\n response,\n getattr(response, \"__traceback__\", None),\n )\n else:\n activation.__exit__(None, None, None)\n\n env_token = request.environ.get(_ENVIRON_TOKEN, None)\n if env_token is not None:\n context.detach(env_token)\n\n return response\n\n return trace_tween\n", "path": "instrumentation/opentelemetry-instrumentation-pyramid/src/opentelemetry/instrumentation/pyramid/callbacks.py"}]}
| 2,569 | 872 |
gh_patches_debug_2737
|
rasdani/github-patches
|
git_diff
|
holoviz__panel-2616
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
--autoreload raises AttributeError: 'NoneType' object has no attribute 'stop'
I'm on the current Panel master. When I `panel serve 'script.py' --autoreload` this code
```python
import panel as pn
pn.extension()
import numpy as np
import holoviews as hv
from holoviews import opts, streams
from holoviews.plotting.links import DataLink
hv.extension('bokeh')
curve = hv.Curve(np.random.randn(10).cumsum()).opts(responsive=True, line_width=6)
table = hv.Table(curve).opts(editable=True)
component=pn.pane.HoloViews(table, height=500, sizing_mode="stretch_both")
pn.template.FastListTemplate(title="Table", main=[component]).servable()
```
and change the code I get the error
```bash
2021-08-04 06:40:44,760 Error thrown from periodic callback:
2021-08-04 06:40:44,763 Traceback (most recent call last):
File "c:\repos\private\panel_docker\panel\.venv\lib\site-packages\tornado\gen.py", line 526, in callback
result_list.append(f.result())
File "c:\repos\private\panel_docker\panel\.venv\lib\site-packages\bokeh\server\session.py", line 67, in _needs_document_lock_wrapper
result = func(self, *args, **kwargs)
File "c:\repos\private\panel_docker\panel\.venv\lib\site-packages\bokeh\server\session.py", line 195, in with_document_locked
return func(*args, **kwargs)
File "c:\repos\private\panel_docker\panel\.venv\lib\site-packages\bokeh\document\document.py", line 1212, in wrapper
return doc._with_self_as_curdoc(invoke)
File "c:\repos\private\panel_docker\panel\.venv\lib\site-packages\bokeh\document\document.py", line 1198, in _with_self_as_curdoc
return f()
File "c:\repos\private\panel_docker\panel\.venv\lib\site-packages\bokeh\document\document.py", line 1211, in invoke
return f(*args, **kwargs)
File "c:\repos\private\panel_docker\panel\panel\io\callbacks.py", line 72, in _periodic_callback
self.callback()
File "c:\repos\private\panel_docker\panel\panel\io\reload.py", line 155, in _reload_on_update
_check_file(modify_times, path)
File "c:\repos\private\panel_docker\panel\panel\io\reload.py", line 134, in _check_file
_reload(module)
File "c:\repos\private\panel_docker\panel\panel\io\reload.py", line 117, in _reload
cb.stop()
File "c:\repos\private\panel_docker\panel\panel\io\callbacks.py", line 134, in stop
self._cb.stop()
AttributeError: 'NoneType' object has no attribute 'stop'
```
I believe this is would be a major issue if 0.12.1 was released before fixing this @philippjfr
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `panel/io/callbacks.py`
Content:
```
1 """
2 Defines callbacks to be executed on a thread or by scheduling it
3 on a running bokeh server.
4 """
5 import time
6 import param
7
8 from bokeh.io import curdoc as _curdoc
9
10 from ..util import edit_readonly
11 from .state import state
12
13
14 class PeriodicCallback(param.Parameterized):
15 """
16 Periodic encapsulates a periodic callback which will run both
17 in tornado based notebook environments and on bokeh server. By
18 default the callback will run until the stop method is called,
19 but count and timeout values can be set to limit the number of
20 executions or the maximum length of time for which the callback
21 will run. The callback may also be started and stopped by setting
22 the running parameter to True or False respectively.
23 """
24
25 callback = param.Callable(doc="""
26 The callback to execute periodically.""")
27
28 count = param.Integer(default=None, doc="""
29 Number of times the callback will be executed, by default
30 this is unlimited.""")
31
32 period = param.Integer(default=500, doc="""
33 Period in milliseconds at which the callback is executed.""")
34
35 timeout = param.Integer(default=None, doc="""
36 Timeout in milliseconds from the start time at which the callback
37 expires.""")
38
39 running = param.Boolean(default=False, doc="""
40 Toggles whether the periodic callback is currently running.""")
41
42 def __init__(self, **params):
43 super().__init__(**params)
44 self._counter = 0
45 self._start_time = None
46 self._cb = None
47 self._updating = False
48 self._doc = None
49
50 @param.depends('running', watch=True)
51 def _start(self):
52 if not self.running or self._updating:
53 return
54 self.start()
55
56 @param.depends('running', watch=True)
57 def _stop(self):
58 if self.running or self._updating:
59 return
60 self.stop()
61
62 @param.depends('period', watch=True)
63 def _update_period(self):
64 if self._cb:
65 self.stop()
66 self.start()
67
68 def _periodic_callback(self):
69 with edit_readonly(state):
70 state.busy = True
71 try:
72 self.callback()
73 finally:
74 with edit_readonly(state):
75 state.busy = False
76 self._counter += 1
77 if self.timeout is not None:
78 dt = (time.time() - self._start_time) * 1000
79 if dt > self.timeout:
80 self.stop()
81 if self._counter == self.count:
82 self.stop()
83
84 @property
85 def counter(self):
86 """
87 Returns the execution count of the periodic callback.
88 """
89 return self._counter
90
91 def _cleanup(self, session_context):
92 self.stop()
93
94 def start(self):
95 """
96 Starts running the periodic callback.
97 """
98 if self._cb is not None:
99 raise RuntimeError('Periodic callback has already started.')
100 if not self.running:
101 try:
102 self._updating = True
103 self.running = True
104 finally:
105 self._updating = False
106 self._start_time = time.time()
107 if state.curdoc:
108 self._doc = state.curdoc
109 self._cb = self._doc.add_periodic_callback(self._periodic_callback, self.period)
110 else:
111 from tornado.ioloop import PeriodicCallback
112 self._cb = PeriodicCallback(self._periodic_callback, self.period)
113 self._cb.start()
114 try:
115 state.on_session_destroyed(self._cleanup)
116 except Exception:
117 pass
118
119 def stop(self):
120 """
121 Stops running the periodic callback.
122 """
123 if self.running:
124 try:
125 self._updating = True
126 self.running = False
127 finally:
128 self._updating = False
129 self._counter = 0
130 self._timeout = None
131 if self._doc:
132 self._doc.remove_periodic_callback(self._cb)
133 else:
134 self._cb.stop()
135 self._cb = None
136 doc = self._doc or _curdoc()
137 if doc:
138 doc.session_destroyed_callbacks = {
139 cb for cb in doc.session_destroyed_callbacks
140 if cb is not self._cleanup
141 }
142 self._doc = None
143
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/panel/io/callbacks.py b/panel/io/callbacks.py
--- a/panel/io/callbacks.py
+++ b/panel/io/callbacks.py
@@ -130,7 +130,7 @@
self._timeout = None
if self._doc:
self._doc.remove_periodic_callback(self._cb)
- else:
+ elif self._cb:
self._cb.stop()
self._cb = None
doc = self._doc or _curdoc()
|
{"golden_diff": "diff --git a/panel/io/callbacks.py b/panel/io/callbacks.py\n--- a/panel/io/callbacks.py\n+++ b/panel/io/callbacks.py\n@@ -130,7 +130,7 @@\n self._timeout = None\n if self._doc:\n self._doc.remove_periodic_callback(self._cb)\n- else:\n+ elif self._cb:\n self._cb.stop()\n self._cb = None\n doc = self._doc or _curdoc()\n", "issue": "--autoreload raises AttributeError: 'NoneType' object has no attribute 'stop'\nI'm on the current Panel master. When I `panel serve 'script.py' --autoreload` this code\r\n\r\n```python\r\nimport panel as pn\r\npn.extension()\r\n\r\nimport numpy as np\r\nimport holoviews as hv\r\n\r\nfrom holoviews import opts, streams\r\nfrom holoviews.plotting.links import DataLink\r\n\r\nhv.extension('bokeh')\r\ncurve = hv.Curve(np.random.randn(10).cumsum()).opts(responsive=True, line_width=6)\r\ntable = hv.Table(curve).opts(editable=True)\r\ncomponent=pn.pane.HoloViews(table, height=500, sizing_mode=\"stretch_both\")\r\npn.template.FastListTemplate(title=\"Table\", main=[component]).servable()\r\n```\r\n\r\nand change the code I get the error\r\n\r\n```bash\r\n2021-08-04 06:40:44,760 Error thrown from periodic callback:\r\n2021-08-04 06:40:44,763 Traceback (most recent call last):\r\n File \"c:\\repos\\private\\panel_docker\\panel\\.venv\\lib\\site-packages\\tornado\\gen.py\", line 526, in callback\r\n result_list.append(f.result())\r\n File \"c:\\repos\\private\\panel_docker\\panel\\.venv\\lib\\site-packages\\bokeh\\server\\session.py\", line 67, in _needs_document_lock_wrapper\r\n result = func(self, *args, **kwargs)\r\n File \"c:\\repos\\private\\panel_docker\\panel\\.venv\\lib\\site-packages\\bokeh\\server\\session.py\", line 195, in with_document_locked\r\n return func(*args, **kwargs)\r\n File \"c:\\repos\\private\\panel_docker\\panel\\.venv\\lib\\site-packages\\bokeh\\document\\document.py\", line 1212, in wrapper\r\n return doc._with_self_as_curdoc(invoke)\r\n File \"c:\\repos\\private\\panel_docker\\panel\\.venv\\lib\\site-packages\\bokeh\\document\\document.py\", line 1198, in _with_self_as_curdoc\r\n return f()\r\n File \"c:\\repos\\private\\panel_docker\\panel\\.venv\\lib\\site-packages\\bokeh\\document\\document.py\", line 1211, in invoke\r\n return f(*args, **kwargs)\r\n File \"c:\\repos\\private\\panel_docker\\panel\\panel\\io\\callbacks.py\", line 72, in _periodic_callback\r\n self.callback()\r\n File \"c:\\repos\\private\\panel_docker\\panel\\panel\\io\\reload.py\", line 155, in _reload_on_update\r\n _check_file(modify_times, path)\r\n File \"c:\\repos\\private\\panel_docker\\panel\\panel\\io\\reload.py\", line 134, in _check_file\r\n _reload(module)\r\n File \"c:\\repos\\private\\panel_docker\\panel\\panel\\io\\reload.py\", line 117, in _reload\r\n cb.stop()\r\n File \"c:\\repos\\private\\panel_docker\\panel\\panel\\io\\callbacks.py\", line 134, in stop\r\n self._cb.stop()\r\nAttributeError: 'NoneType' object has no attribute 'stop'\r\n```\r\n\r\nI believe this is would be a major issue if 0.12.1 was released before fixing this @philippjfr \n", "before_files": [{"content": "\"\"\"\nDefines callbacks to be executed on a thread or by scheduling it\non a running bokeh server.\n\"\"\"\nimport time\nimport param\n\nfrom bokeh.io import curdoc as _curdoc\n\nfrom ..util import edit_readonly\nfrom .state import state\n\n\nclass PeriodicCallback(param.Parameterized):\n \"\"\"\n Periodic encapsulates a periodic callback which will run both\n in tornado based notebook environments and on bokeh server. By\n default the callback will run until the stop method is called,\n but count and timeout values can be set to limit the number of\n executions or the maximum length of time for which the callback\n will run. The callback may also be started and stopped by setting\n the running parameter to True or False respectively.\n \"\"\"\n\n callback = param.Callable(doc=\"\"\"\n The callback to execute periodically.\"\"\")\n\n count = param.Integer(default=None, doc=\"\"\"\n Number of times the callback will be executed, by default\n this is unlimited.\"\"\")\n\n period = param.Integer(default=500, doc=\"\"\"\n Period in milliseconds at which the callback is executed.\"\"\")\n\n timeout = param.Integer(default=None, doc=\"\"\"\n Timeout in milliseconds from the start time at which the callback\n expires.\"\"\")\n\n running = param.Boolean(default=False, doc=\"\"\"\n Toggles whether the periodic callback is currently running.\"\"\")\n\n def __init__(self, **params):\n super().__init__(**params)\n self._counter = 0\n self._start_time = None\n self._cb = None\n self._updating = False\n self._doc = None\n\n @param.depends('running', watch=True)\n def _start(self):\n if not self.running or self._updating:\n return\n self.start()\n\n @param.depends('running', watch=True)\n def _stop(self):\n if self.running or self._updating:\n return\n self.stop()\n\n @param.depends('period', watch=True)\n def _update_period(self):\n if self._cb:\n self.stop()\n self.start()\n\n def _periodic_callback(self):\n with edit_readonly(state):\n state.busy = True\n try:\n self.callback()\n finally:\n with edit_readonly(state):\n state.busy = False\n self._counter += 1\n if self.timeout is not None:\n dt = (time.time() - self._start_time) * 1000\n if dt > self.timeout:\n self.stop()\n if self._counter == self.count:\n self.stop()\n\n @property\n def counter(self):\n \"\"\"\n Returns the execution count of the periodic callback.\n \"\"\"\n return self._counter\n\n def _cleanup(self, session_context):\n self.stop()\n\n def start(self):\n \"\"\"\n Starts running the periodic callback.\n \"\"\"\n if self._cb is not None:\n raise RuntimeError('Periodic callback has already started.')\n if not self.running:\n try:\n self._updating = True\n self.running = True\n finally:\n self._updating = False\n self._start_time = time.time()\n if state.curdoc:\n self._doc = state.curdoc\n self._cb = self._doc.add_periodic_callback(self._periodic_callback, self.period)\n else:\n from tornado.ioloop import PeriodicCallback\n self._cb = PeriodicCallback(self._periodic_callback, self.period)\n self._cb.start()\n try:\n state.on_session_destroyed(self._cleanup)\n except Exception:\n pass\n\n def stop(self):\n \"\"\"\n Stops running the periodic callback.\n \"\"\"\n if self.running:\n try:\n self._updating = True\n self.running = False\n finally:\n self._updating = False\n self._counter = 0\n self._timeout = None\n if self._doc:\n self._doc.remove_periodic_callback(self._cb)\n else:\n self._cb.stop()\n self._cb = None\n doc = self._doc or _curdoc()\n if doc:\n doc.session_destroyed_callbacks = {\n cb for cb in doc.session_destroyed_callbacks\n if cb is not self._cleanup\n }\n self._doc = None\n", "path": "panel/io/callbacks.py"}], "after_files": [{"content": "\"\"\"\nDefines callbacks to be executed on a thread or by scheduling it\non a running bokeh server.\n\"\"\"\nimport time\nimport param\n\nfrom bokeh.io import curdoc as _curdoc\n\nfrom ..util import edit_readonly\nfrom .state import state\n\n\nclass PeriodicCallback(param.Parameterized):\n \"\"\"\n Periodic encapsulates a periodic callback which will run both\n in tornado based notebook environments and on bokeh server. By\n default the callback will run until the stop method is called,\n but count and timeout values can be set to limit the number of\n executions or the maximum length of time for which the callback\n will run. The callback may also be started and stopped by setting\n the running parameter to True or False respectively.\n \"\"\"\n\n callback = param.Callable(doc=\"\"\"\n The callback to execute periodically.\"\"\")\n\n count = param.Integer(default=None, doc=\"\"\"\n Number of times the callback will be executed, by default\n this is unlimited.\"\"\")\n\n period = param.Integer(default=500, doc=\"\"\"\n Period in milliseconds at which the callback is executed.\"\"\")\n\n timeout = param.Integer(default=None, doc=\"\"\"\n Timeout in milliseconds from the start time at which the callback\n expires.\"\"\")\n\n running = param.Boolean(default=False, doc=\"\"\"\n Toggles whether the periodic callback is currently running.\"\"\")\n\n def __init__(self, **params):\n super().__init__(**params)\n self._counter = 0\n self._start_time = None\n self._cb = None\n self._updating = False\n self._doc = None\n\n @param.depends('running', watch=True)\n def _start(self):\n if not self.running or self._updating:\n return\n self.start()\n\n @param.depends('running', watch=True)\n def _stop(self):\n if self.running or self._updating:\n return\n self.stop()\n\n @param.depends('period', watch=True)\n def _update_period(self):\n if self._cb:\n self.stop()\n self.start()\n\n def _periodic_callback(self):\n with edit_readonly(state):\n state.busy = True\n try:\n self.callback()\n finally:\n with edit_readonly(state):\n state.busy = False\n self._counter += 1\n if self.timeout is not None:\n dt = (time.time() - self._start_time) * 1000\n if dt > self.timeout:\n self.stop()\n if self._counter == self.count:\n self.stop()\n\n @property\n def counter(self):\n \"\"\"\n Returns the execution count of the periodic callback.\n \"\"\"\n return self._counter\n\n def _cleanup(self, session_context):\n self.stop()\n\n def start(self):\n \"\"\"\n Starts running the periodic callback.\n \"\"\"\n if self._cb is not None:\n raise RuntimeError('Periodic callback has already started.')\n if not self.running:\n try:\n self._updating = True\n self.running = True\n finally:\n self._updating = False\n self._start_time = time.time()\n if state.curdoc:\n self._doc = state.curdoc\n self._cb = self._doc.add_periodic_callback(self._periodic_callback, self.period)\n else:\n from tornado.ioloop import PeriodicCallback\n self._cb = PeriodicCallback(self._periodic_callback, self.period)\n self._cb.start()\n try:\n state.on_session_destroyed(self._cleanup)\n except Exception:\n pass\n\n def stop(self):\n \"\"\"\n Stops running the periodic callback.\n \"\"\"\n if self.running:\n try:\n self._updating = True\n self.running = False\n finally:\n self._updating = False\n self._counter = 0\n self._timeout = None\n if self._doc:\n self._doc.remove_periodic_callback(self._cb)\n elif self._cb:\n self._cb.stop()\n self._cb = None\n doc = self._doc or _curdoc()\n if doc:\n doc.session_destroyed_callbacks = {\n cb for cb in doc.session_destroyed_callbacks\n if cb is not self._cleanup\n }\n self._doc = None\n", "path": "panel/io/callbacks.py"}]}
| 2,285 | 110 |
gh_patches_debug_18702
|
rasdani/github-patches
|
git_diff
|
CTFd__CTFd-1951
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add time to export filename
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `CTFd/admin/__init__.py`
Content:
```
1 import csv
2 import datetime
3 import os
4 from io import BytesIO, StringIO
5
6 from flask import Blueprint, abort
7 from flask import current_app as app
8 from flask import (
9 redirect,
10 render_template,
11 render_template_string,
12 request,
13 send_file,
14 url_for,
15 )
16
17 admin = Blueprint("admin", __name__)
18
19 # isort:imports-firstparty
20 from CTFd.admin import challenges # noqa: F401
21 from CTFd.admin import notifications # noqa: F401
22 from CTFd.admin import pages # noqa: F401
23 from CTFd.admin import scoreboard # noqa: F401
24 from CTFd.admin import statistics # noqa: F401
25 from CTFd.admin import submissions # noqa: F401
26 from CTFd.admin import teams # noqa: F401
27 from CTFd.admin import users # noqa: F401
28 from CTFd.cache import cache, clear_config, clear_pages, clear_standings
29 from CTFd.models import (
30 Awards,
31 Challenges,
32 Configs,
33 Notifications,
34 Pages,
35 Solves,
36 Submissions,
37 Teams,
38 Tracking,
39 Unlocks,
40 Users,
41 db,
42 get_class_by_tablename,
43 )
44 from CTFd.utils import config as ctf_config
45 from CTFd.utils import get_config, set_config
46 from CTFd.utils.csv import load_challenges_csv, load_teams_csv, load_users_csv
47 from CTFd.utils.decorators import admins_only
48 from CTFd.utils.exports import export_ctf as export_ctf_util
49 from CTFd.utils.exports import import_ctf as import_ctf_util
50 from CTFd.utils.helpers import get_errors
51 from CTFd.utils.security.auth import logout_user
52 from CTFd.utils.uploads import delete_file
53 from CTFd.utils.user import is_admin
54
55
56 @admin.route("/admin", methods=["GET"])
57 def view():
58 if is_admin():
59 return redirect(url_for("admin.statistics"))
60 return redirect(url_for("auth.login"))
61
62
63 @admin.route("/admin/plugins/<plugin>", methods=["GET", "POST"])
64 @admins_only
65 def plugin(plugin):
66 if request.method == "GET":
67 plugins_path = os.path.join(app.root_path, "plugins")
68
69 config_html_plugins = [
70 name
71 for name in os.listdir(plugins_path)
72 if os.path.isfile(os.path.join(plugins_path, name, "config.html"))
73 ]
74
75 if plugin in config_html_plugins:
76 config_html = open(
77 os.path.join(app.root_path, "plugins", plugin, "config.html")
78 ).read()
79 return render_template_string(config_html)
80 abort(404)
81 elif request.method == "POST":
82 for k, v in request.form.items():
83 if k == "nonce":
84 continue
85 set_config(k, v)
86 with app.app_context():
87 clear_config()
88 return "1"
89
90
91 @admin.route("/admin/import", methods=["POST"])
92 @admins_only
93 def import_ctf():
94 backup = request.files["backup"]
95 errors = get_errors()
96 try:
97 import_ctf_util(backup)
98 except Exception as e:
99 print(e)
100 errors.append(repr(e))
101
102 if errors:
103 return errors[0], 500
104 else:
105 return redirect(url_for("admin.config"))
106
107
108 @admin.route("/admin/export", methods=["GET", "POST"])
109 @admins_only
110 def export_ctf():
111 backup = export_ctf_util()
112 ctf_name = ctf_config.ctf_name()
113 day = datetime.datetime.now().strftime("%Y-%m-%d")
114 full_name = u"{}.{}.zip".format(ctf_name, day)
115 return send_file(
116 backup, cache_timeout=-1, as_attachment=True, attachment_filename=full_name
117 )
118
119
120 @admin.route("/admin/import/csv", methods=["POST"])
121 @admins_only
122 def import_csv():
123 csv_type = request.form["csv_type"]
124 # Try really hard to load data in properly no matter what nonsense Excel gave you
125 raw = request.files["csv_file"].stream.read()
126 try:
127 csvdata = raw.decode("utf-8-sig")
128 except UnicodeDecodeError:
129 try:
130 csvdata = raw.decode("cp1252")
131 except UnicodeDecodeError:
132 csvdata = raw.decode("latin-1")
133 csvfile = StringIO(csvdata)
134
135 loaders = {
136 "challenges": load_challenges_csv,
137 "users": load_users_csv,
138 "teams": load_teams_csv,
139 }
140
141 loader = loaders[csv_type]
142 reader = csv.DictReader(csvfile)
143 loader(reader)
144 return redirect(url_for("admin.config"))
145
146
147 @admin.route("/admin/export/csv")
148 @admins_only
149 def export_csv():
150 table = request.args.get("table")
151
152 # TODO: It might make sense to limit dumpable tables. Config could potentially leak sensitive information.
153 model = get_class_by_tablename(table)
154 if model is None:
155 abort(404)
156
157 temp = StringIO()
158 writer = csv.writer(temp)
159
160 header = [column.name for column in model.__mapper__.columns]
161 writer.writerow(header)
162
163 responses = model.query.all()
164
165 for curr in responses:
166 writer.writerow(
167 [getattr(curr, column.name) for column in model.__mapper__.columns]
168 )
169
170 temp.seek(0)
171
172 # In Python 3 send_file requires bytes
173 output = BytesIO()
174 output.write(temp.getvalue().encode("utf-8"))
175 output.seek(0)
176 temp.close()
177
178 return send_file(
179 output,
180 as_attachment=True,
181 cache_timeout=-1,
182 attachment_filename="{name}-{table}.csv".format(
183 name=ctf_config.ctf_name(), table=table
184 ),
185 )
186
187
188 @admin.route("/admin/config", methods=["GET", "POST"])
189 @admins_only
190 def config():
191 # Clear the config cache so that we don't get stale values
192 clear_config()
193
194 configs = Configs.query.all()
195 configs = {c.key: get_config(c.key) for c in configs}
196
197 themes = ctf_config.get_themes()
198 themes.remove(get_config("ctf_theme"))
199
200 return render_template("admin/config.html", themes=themes, **configs)
201
202
203 @admin.route("/admin/reset", methods=["GET", "POST"])
204 @admins_only
205 def reset():
206 if request.method == "POST":
207 require_setup = False
208 logout = False
209 next_url = url_for("admin.statistics")
210
211 data = request.form
212
213 if data.get("pages"):
214 _pages = Pages.query.all()
215 for p in _pages:
216 for f in p.files:
217 delete_file(file_id=f.id)
218
219 Pages.query.delete()
220
221 if data.get("notifications"):
222 Notifications.query.delete()
223
224 if data.get("challenges"):
225 _challenges = Challenges.query.all()
226 for c in _challenges:
227 for f in c.files:
228 delete_file(file_id=f.id)
229 Challenges.query.delete()
230
231 if data.get("accounts"):
232 Users.query.delete()
233 Teams.query.delete()
234 require_setup = True
235 logout = True
236
237 if data.get("submissions"):
238 Solves.query.delete()
239 Submissions.query.delete()
240 Awards.query.delete()
241 Unlocks.query.delete()
242 Tracking.query.delete()
243
244 if require_setup:
245 set_config("setup", False)
246 cache.clear()
247 logout_user()
248 next_url = url_for("views.setup")
249
250 db.session.commit()
251
252 clear_pages()
253 clear_standings()
254 clear_config()
255
256 if logout is True:
257 cache.clear()
258 logout_user()
259
260 db.session.close()
261 return redirect(next_url)
262
263 return render_template("admin/reset.html")
264
```
Path: `manage.py`
Content:
```
1 import datetime
2 import shutil
3
4 from flask_migrate import MigrateCommand
5 from flask_script import Manager
6
7 from CTFd import create_app
8 from CTFd.utils import get_config as get_config_util
9 from CTFd.utils import set_config as set_config_util
10 from CTFd.utils.config import ctf_name
11 from CTFd.utils.exports import export_ctf as export_ctf_util
12 from CTFd.utils.exports import import_ctf as import_ctf_util
13
14 app = create_app()
15
16 manager = Manager(app)
17 manager.add_command("db", MigrateCommand)
18
19
20 def jsenums():
21 from CTFd.constants import JS_ENUMS
22 import json
23 import os
24
25 path = os.path.join(app.root_path, "themes/core/assets/js/constants.js")
26
27 with open(path, "w+") as f:
28 for k, v in JS_ENUMS.items():
29 f.write("const {} = Object.freeze({});".format(k, json.dumps(v)))
30
31
32 BUILD_COMMANDS = {"jsenums": jsenums}
33
34
35 @manager.command
36 def get_config(key):
37 with app.app_context():
38 print(get_config_util(key))
39
40
41 @manager.command
42 def set_config(key, value):
43 with app.app_context():
44 print(set_config_util(key, value).value)
45
46
47 @manager.command
48 def build(cmd):
49 with app.app_context():
50 cmd = BUILD_COMMANDS.get(cmd)
51 cmd()
52
53
54 @manager.command
55 def export_ctf(path=None):
56 with app.app_context():
57 backup = export_ctf_util()
58
59 if path:
60 with open(path, "wb") as target:
61 shutil.copyfileobj(backup, target)
62 else:
63 name = ctf_name()
64 day = datetime.datetime.now().strftime("%Y-%m-%d")
65 full_name = f"{name}.{day}.zip"
66
67 with open(full_name, "wb") as target:
68 shutil.copyfileobj(backup, target)
69
70 print(f"Exported {full_name}")
71
72
73 @manager.command
74 def import_ctf(path):
75 with app.app_context():
76 import_ctf_util(path)
77
78
79 if __name__ == "__main__":
80 manager.run()
81
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/CTFd/admin/__init__.py b/CTFd/admin/__init__.py
--- a/CTFd/admin/__init__.py
+++ b/CTFd/admin/__init__.py
@@ -110,7 +110,7 @@
def export_ctf():
backup = export_ctf_util()
ctf_name = ctf_config.ctf_name()
- day = datetime.datetime.now().strftime("%Y-%m-%d")
+ day = datetime.datetime.now().strftime("%Y-%m-%d_%T")
full_name = u"{}.{}.zip".format(ctf_name, day)
return send_file(
backup, cache_timeout=-1, as_attachment=True, attachment_filename=full_name
diff --git a/manage.py b/manage.py
--- a/manage.py
+++ b/manage.py
@@ -61,7 +61,7 @@
shutil.copyfileobj(backup, target)
else:
name = ctf_name()
- day = datetime.datetime.now().strftime("%Y-%m-%d")
+ day = datetime.datetime.now().strftime("%Y-%m-%d_%T")
full_name = f"{name}.{day}.zip"
with open(full_name, "wb") as target:
|
{"golden_diff": "diff --git a/CTFd/admin/__init__.py b/CTFd/admin/__init__.py\n--- a/CTFd/admin/__init__.py\n+++ b/CTFd/admin/__init__.py\n@@ -110,7 +110,7 @@\n def export_ctf():\n backup = export_ctf_util()\n ctf_name = ctf_config.ctf_name()\n- day = datetime.datetime.now().strftime(\"%Y-%m-%d\")\n+ day = datetime.datetime.now().strftime(\"%Y-%m-%d_%T\")\n full_name = u\"{}.{}.zip\".format(ctf_name, day)\n return send_file(\n backup, cache_timeout=-1, as_attachment=True, attachment_filename=full_name\ndiff --git a/manage.py b/manage.py\n--- a/manage.py\n+++ b/manage.py\n@@ -61,7 +61,7 @@\n shutil.copyfileobj(backup, target)\n else:\n name = ctf_name()\n- day = datetime.datetime.now().strftime(\"%Y-%m-%d\")\n+ day = datetime.datetime.now().strftime(\"%Y-%m-%d_%T\")\n full_name = f\"{name}.{day}.zip\"\n \n with open(full_name, \"wb\") as target:\n", "issue": "Add time to export filename\n\n", "before_files": [{"content": "import csv\nimport datetime\nimport os\nfrom io import BytesIO, StringIO\n\nfrom flask import Blueprint, abort\nfrom flask import current_app as app\nfrom flask import (\n redirect,\n render_template,\n render_template_string,\n request,\n send_file,\n url_for,\n)\n\nadmin = Blueprint(\"admin\", __name__)\n\n# isort:imports-firstparty\nfrom CTFd.admin import challenges # noqa: F401\nfrom CTFd.admin import notifications # noqa: F401\nfrom CTFd.admin import pages # noqa: F401\nfrom CTFd.admin import scoreboard # noqa: F401\nfrom CTFd.admin import statistics # noqa: F401\nfrom CTFd.admin import submissions # noqa: F401\nfrom CTFd.admin import teams # noqa: F401\nfrom CTFd.admin import users # noqa: F401\nfrom CTFd.cache import cache, clear_config, clear_pages, clear_standings\nfrom CTFd.models import (\n Awards,\n Challenges,\n Configs,\n Notifications,\n Pages,\n Solves,\n Submissions,\n Teams,\n Tracking,\n Unlocks,\n Users,\n db,\n get_class_by_tablename,\n)\nfrom CTFd.utils import config as ctf_config\nfrom CTFd.utils import get_config, set_config\nfrom CTFd.utils.csv import load_challenges_csv, load_teams_csv, load_users_csv\nfrom CTFd.utils.decorators import admins_only\nfrom CTFd.utils.exports import export_ctf as export_ctf_util\nfrom CTFd.utils.exports import import_ctf as import_ctf_util\nfrom CTFd.utils.helpers import get_errors\nfrom CTFd.utils.security.auth import logout_user\nfrom CTFd.utils.uploads import delete_file\nfrom CTFd.utils.user import is_admin\n\n\[email protected](\"/admin\", methods=[\"GET\"])\ndef view():\n if is_admin():\n return redirect(url_for(\"admin.statistics\"))\n return redirect(url_for(\"auth.login\"))\n\n\[email protected](\"/admin/plugins/<plugin>\", methods=[\"GET\", \"POST\"])\n@admins_only\ndef plugin(plugin):\n if request.method == \"GET\":\n plugins_path = os.path.join(app.root_path, \"plugins\")\n\n config_html_plugins = [\n name\n for name in os.listdir(plugins_path)\n if os.path.isfile(os.path.join(plugins_path, name, \"config.html\"))\n ]\n\n if plugin in config_html_plugins:\n config_html = open(\n os.path.join(app.root_path, \"plugins\", plugin, \"config.html\")\n ).read()\n return render_template_string(config_html)\n abort(404)\n elif request.method == \"POST\":\n for k, v in request.form.items():\n if k == \"nonce\":\n continue\n set_config(k, v)\n with app.app_context():\n clear_config()\n return \"1\"\n\n\[email protected](\"/admin/import\", methods=[\"POST\"])\n@admins_only\ndef import_ctf():\n backup = request.files[\"backup\"]\n errors = get_errors()\n try:\n import_ctf_util(backup)\n except Exception as e:\n print(e)\n errors.append(repr(e))\n\n if errors:\n return errors[0], 500\n else:\n return redirect(url_for(\"admin.config\"))\n\n\[email protected](\"/admin/export\", methods=[\"GET\", \"POST\"])\n@admins_only\ndef export_ctf():\n backup = export_ctf_util()\n ctf_name = ctf_config.ctf_name()\n day = datetime.datetime.now().strftime(\"%Y-%m-%d\")\n full_name = u\"{}.{}.zip\".format(ctf_name, day)\n return send_file(\n backup, cache_timeout=-1, as_attachment=True, attachment_filename=full_name\n )\n\n\[email protected](\"/admin/import/csv\", methods=[\"POST\"])\n@admins_only\ndef import_csv():\n csv_type = request.form[\"csv_type\"]\n # Try really hard to load data in properly no matter what nonsense Excel gave you\n raw = request.files[\"csv_file\"].stream.read()\n try:\n csvdata = raw.decode(\"utf-8-sig\")\n except UnicodeDecodeError:\n try:\n csvdata = raw.decode(\"cp1252\")\n except UnicodeDecodeError:\n csvdata = raw.decode(\"latin-1\")\n csvfile = StringIO(csvdata)\n\n loaders = {\n \"challenges\": load_challenges_csv,\n \"users\": load_users_csv,\n \"teams\": load_teams_csv,\n }\n\n loader = loaders[csv_type]\n reader = csv.DictReader(csvfile)\n loader(reader)\n return redirect(url_for(\"admin.config\"))\n\n\[email protected](\"/admin/export/csv\")\n@admins_only\ndef export_csv():\n table = request.args.get(\"table\")\n\n # TODO: It might make sense to limit dumpable tables. Config could potentially leak sensitive information.\n model = get_class_by_tablename(table)\n if model is None:\n abort(404)\n\n temp = StringIO()\n writer = csv.writer(temp)\n\n header = [column.name for column in model.__mapper__.columns]\n writer.writerow(header)\n\n responses = model.query.all()\n\n for curr in responses:\n writer.writerow(\n [getattr(curr, column.name) for column in model.__mapper__.columns]\n )\n\n temp.seek(0)\n\n # In Python 3 send_file requires bytes\n output = BytesIO()\n output.write(temp.getvalue().encode(\"utf-8\"))\n output.seek(0)\n temp.close()\n\n return send_file(\n output,\n as_attachment=True,\n cache_timeout=-1,\n attachment_filename=\"{name}-{table}.csv\".format(\n name=ctf_config.ctf_name(), table=table\n ),\n )\n\n\[email protected](\"/admin/config\", methods=[\"GET\", \"POST\"])\n@admins_only\ndef config():\n # Clear the config cache so that we don't get stale values\n clear_config()\n\n configs = Configs.query.all()\n configs = {c.key: get_config(c.key) for c in configs}\n\n themes = ctf_config.get_themes()\n themes.remove(get_config(\"ctf_theme\"))\n\n return render_template(\"admin/config.html\", themes=themes, **configs)\n\n\[email protected](\"/admin/reset\", methods=[\"GET\", \"POST\"])\n@admins_only\ndef reset():\n if request.method == \"POST\":\n require_setup = False\n logout = False\n next_url = url_for(\"admin.statistics\")\n\n data = request.form\n\n if data.get(\"pages\"):\n _pages = Pages.query.all()\n for p in _pages:\n for f in p.files:\n delete_file(file_id=f.id)\n\n Pages.query.delete()\n\n if data.get(\"notifications\"):\n Notifications.query.delete()\n\n if data.get(\"challenges\"):\n _challenges = Challenges.query.all()\n for c in _challenges:\n for f in c.files:\n delete_file(file_id=f.id)\n Challenges.query.delete()\n\n if data.get(\"accounts\"):\n Users.query.delete()\n Teams.query.delete()\n require_setup = True\n logout = True\n\n if data.get(\"submissions\"):\n Solves.query.delete()\n Submissions.query.delete()\n Awards.query.delete()\n Unlocks.query.delete()\n Tracking.query.delete()\n\n if require_setup:\n set_config(\"setup\", False)\n cache.clear()\n logout_user()\n next_url = url_for(\"views.setup\")\n\n db.session.commit()\n\n clear_pages()\n clear_standings()\n clear_config()\n\n if logout is True:\n cache.clear()\n logout_user()\n\n db.session.close()\n return redirect(next_url)\n\n return render_template(\"admin/reset.html\")\n", "path": "CTFd/admin/__init__.py"}, {"content": "import datetime\nimport shutil\n\nfrom flask_migrate import MigrateCommand\nfrom flask_script import Manager\n\nfrom CTFd import create_app\nfrom CTFd.utils import get_config as get_config_util\nfrom CTFd.utils import set_config as set_config_util\nfrom CTFd.utils.config import ctf_name\nfrom CTFd.utils.exports import export_ctf as export_ctf_util\nfrom CTFd.utils.exports import import_ctf as import_ctf_util\n\napp = create_app()\n\nmanager = Manager(app)\nmanager.add_command(\"db\", MigrateCommand)\n\n\ndef jsenums():\n from CTFd.constants import JS_ENUMS\n import json\n import os\n\n path = os.path.join(app.root_path, \"themes/core/assets/js/constants.js\")\n\n with open(path, \"w+\") as f:\n for k, v in JS_ENUMS.items():\n f.write(\"const {} = Object.freeze({});\".format(k, json.dumps(v)))\n\n\nBUILD_COMMANDS = {\"jsenums\": jsenums}\n\n\[email protected]\ndef get_config(key):\n with app.app_context():\n print(get_config_util(key))\n\n\[email protected]\ndef set_config(key, value):\n with app.app_context():\n print(set_config_util(key, value).value)\n\n\[email protected]\ndef build(cmd):\n with app.app_context():\n cmd = BUILD_COMMANDS.get(cmd)\n cmd()\n\n\[email protected]\ndef export_ctf(path=None):\n with app.app_context():\n backup = export_ctf_util()\n\n if path:\n with open(path, \"wb\") as target:\n shutil.copyfileobj(backup, target)\n else:\n name = ctf_name()\n day = datetime.datetime.now().strftime(\"%Y-%m-%d\")\n full_name = f\"{name}.{day}.zip\"\n\n with open(full_name, \"wb\") as target:\n shutil.copyfileobj(backup, target)\n\n print(f\"Exported {full_name}\")\n\n\[email protected]\ndef import_ctf(path):\n with app.app_context():\n import_ctf_util(path)\n\n\nif __name__ == \"__main__\":\n manager.run()\n", "path": "manage.py"}], "after_files": [{"content": "import csv\nimport datetime\nimport os\nfrom io import BytesIO, StringIO\n\nfrom flask import Blueprint, abort\nfrom flask import current_app as app\nfrom flask import (\n redirect,\n render_template,\n render_template_string,\n request,\n send_file,\n url_for,\n)\n\nadmin = Blueprint(\"admin\", __name__)\n\n# isort:imports-firstparty\nfrom CTFd.admin import challenges # noqa: F401\nfrom CTFd.admin import notifications # noqa: F401\nfrom CTFd.admin import pages # noqa: F401\nfrom CTFd.admin import scoreboard # noqa: F401\nfrom CTFd.admin import statistics # noqa: F401\nfrom CTFd.admin import submissions # noqa: F401\nfrom CTFd.admin import teams # noqa: F401\nfrom CTFd.admin import users # noqa: F401\nfrom CTFd.cache import cache, clear_config, clear_pages, clear_standings\nfrom CTFd.models import (\n Awards,\n Challenges,\n Configs,\n Notifications,\n Pages,\n Solves,\n Submissions,\n Teams,\n Tracking,\n Unlocks,\n Users,\n db,\n get_class_by_tablename,\n)\nfrom CTFd.utils import config as ctf_config\nfrom CTFd.utils import get_config, set_config\nfrom CTFd.utils.csv import load_challenges_csv, load_teams_csv, load_users_csv\nfrom CTFd.utils.decorators import admins_only\nfrom CTFd.utils.exports import export_ctf as export_ctf_util\nfrom CTFd.utils.exports import import_ctf as import_ctf_util\nfrom CTFd.utils.helpers import get_errors\nfrom CTFd.utils.security.auth import logout_user\nfrom CTFd.utils.uploads import delete_file\nfrom CTFd.utils.user import is_admin\n\n\[email protected](\"/admin\", methods=[\"GET\"])\ndef view():\n if is_admin():\n return redirect(url_for(\"admin.statistics\"))\n return redirect(url_for(\"auth.login\"))\n\n\[email protected](\"/admin/plugins/<plugin>\", methods=[\"GET\", \"POST\"])\n@admins_only\ndef plugin(plugin):\n if request.method == \"GET\":\n plugins_path = os.path.join(app.root_path, \"plugins\")\n\n config_html_plugins = [\n name\n for name in os.listdir(plugins_path)\n if os.path.isfile(os.path.join(plugins_path, name, \"config.html\"))\n ]\n\n if plugin in config_html_plugins:\n config_html = open(\n os.path.join(app.root_path, \"plugins\", plugin, \"config.html\")\n ).read()\n return render_template_string(config_html)\n abort(404)\n elif request.method == \"POST\":\n for k, v in request.form.items():\n if k == \"nonce\":\n continue\n set_config(k, v)\n with app.app_context():\n clear_config()\n return \"1\"\n\n\[email protected](\"/admin/import\", methods=[\"POST\"])\n@admins_only\ndef import_ctf():\n backup = request.files[\"backup\"]\n errors = get_errors()\n try:\n import_ctf_util(backup)\n except Exception as e:\n print(e)\n errors.append(repr(e))\n\n if errors:\n return errors[0], 500\n else:\n return redirect(url_for(\"admin.config\"))\n\n\[email protected](\"/admin/export\", methods=[\"GET\", \"POST\"])\n@admins_only\ndef export_ctf():\n backup = export_ctf_util()\n ctf_name = ctf_config.ctf_name()\n day = datetime.datetime.now().strftime(\"%Y-%m-%d_%T\")\n full_name = u\"{}.{}.zip\".format(ctf_name, day)\n return send_file(\n backup, cache_timeout=-1, as_attachment=True, attachment_filename=full_name\n )\n\n\[email protected](\"/admin/import/csv\", methods=[\"POST\"])\n@admins_only\ndef import_csv():\n csv_type = request.form[\"csv_type\"]\n # Try really hard to load data in properly no matter what nonsense Excel gave you\n raw = request.files[\"csv_file\"].stream.read()\n try:\n csvdata = raw.decode(\"utf-8-sig\")\n except UnicodeDecodeError:\n try:\n csvdata = raw.decode(\"cp1252\")\n except UnicodeDecodeError:\n csvdata = raw.decode(\"latin-1\")\n csvfile = StringIO(csvdata)\n\n loaders = {\n \"challenges\": load_challenges_csv,\n \"users\": load_users_csv,\n \"teams\": load_teams_csv,\n }\n\n loader = loaders[csv_type]\n reader = csv.DictReader(csvfile)\n loader(reader)\n return redirect(url_for(\"admin.config\"))\n\n\[email protected](\"/admin/export/csv\")\n@admins_only\ndef export_csv():\n table = request.args.get(\"table\")\n\n # TODO: It might make sense to limit dumpable tables. Config could potentially leak sensitive information.\n model = get_class_by_tablename(table)\n if model is None:\n abort(404)\n\n temp = StringIO()\n writer = csv.writer(temp)\n\n header = [column.name for column in model.__mapper__.columns]\n writer.writerow(header)\n\n responses = model.query.all()\n\n for curr in responses:\n writer.writerow(\n [getattr(curr, column.name) for column in model.__mapper__.columns]\n )\n\n temp.seek(0)\n\n # In Python 3 send_file requires bytes\n output = BytesIO()\n output.write(temp.getvalue().encode(\"utf-8\"))\n output.seek(0)\n temp.close()\n\n return send_file(\n output,\n as_attachment=True,\n cache_timeout=-1,\n attachment_filename=\"{name}-{table}.csv\".format(\n name=ctf_config.ctf_name(), table=table\n ),\n )\n\n\[email protected](\"/admin/config\", methods=[\"GET\", \"POST\"])\n@admins_only\ndef config():\n # Clear the config cache so that we don't get stale values\n clear_config()\n\n configs = Configs.query.all()\n configs = {c.key: get_config(c.key) for c in configs}\n\n themes = ctf_config.get_themes()\n themes.remove(get_config(\"ctf_theme\"))\n\n return render_template(\"admin/config.html\", themes=themes, **configs)\n\n\[email protected](\"/admin/reset\", methods=[\"GET\", \"POST\"])\n@admins_only\ndef reset():\n if request.method == \"POST\":\n require_setup = False\n logout = False\n next_url = url_for(\"admin.statistics\")\n\n data = request.form\n\n if data.get(\"pages\"):\n _pages = Pages.query.all()\n for p in _pages:\n for f in p.files:\n delete_file(file_id=f.id)\n\n Pages.query.delete()\n\n if data.get(\"notifications\"):\n Notifications.query.delete()\n\n if data.get(\"challenges\"):\n _challenges = Challenges.query.all()\n for c in _challenges:\n for f in c.files:\n delete_file(file_id=f.id)\n Challenges.query.delete()\n\n if data.get(\"accounts\"):\n Users.query.delete()\n Teams.query.delete()\n require_setup = True\n logout = True\n\n if data.get(\"submissions\"):\n Solves.query.delete()\n Submissions.query.delete()\n Awards.query.delete()\n Unlocks.query.delete()\n Tracking.query.delete()\n\n if require_setup:\n set_config(\"setup\", False)\n cache.clear()\n logout_user()\n next_url = url_for(\"views.setup\")\n\n db.session.commit()\n\n clear_pages()\n clear_standings()\n clear_config()\n\n if logout is True:\n cache.clear()\n logout_user()\n\n db.session.close()\n return redirect(next_url)\n\n return render_template(\"admin/reset.html\")\n", "path": "CTFd/admin/__init__.py"}, {"content": "import datetime\nimport shutil\n\nfrom flask_migrate import MigrateCommand\nfrom flask_script import Manager\n\nfrom CTFd import create_app\nfrom CTFd.utils import get_config as get_config_util\nfrom CTFd.utils import set_config as set_config_util\nfrom CTFd.utils.config import ctf_name\nfrom CTFd.utils.exports import export_ctf as export_ctf_util\nfrom CTFd.utils.exports import import_ctf as import_ctf_util\n\napp = create_app()\n\nmanager = Manager(app)\nmanager.add_command(\"db\", MigrateCommand)\n\n\ndef jsenums():\n from CTFd.constants import JS_ENUMS\n import json\n import os\n\n path = os.path.join(app.root_path, \"themes/core/assets/js/constants.js\")\n\n with open(path, \"w+\") as f:\n for k, v in JS_ENUMS.items():\n f.write(\"const {} = Object.freeze({});\".format(k, json.dumps(v)))\n\n\nBUILD_COMMANDS = {\"jsenums\": jsenums}\n\n\[email protected]\ndef get_config(key):\n with app.app_context():\n print(get_config_util(key))\n\n\[email protected]\ndef set_config(key, value):\n with app.app_context():\n print(set_config_util(key, value).value)\n\n\[email protected]\ndef build(cmd):\n with app.app_context():\n cmd = BUILD_COMMANDS.get(cmd)\n cmd()\n\n\[email protected]\ndef export_ctf(path=None):\n with app.app_context():\n backup = export_ctf_util()\n\n if path:\n with open(path, \"wb\") as target:\n shutil.copyfileobj(backup, target)\n else:\n name = ctf_name()\n day = datetime.datetime.now().strftime(\"%Y-%m-%d_%T\")\n full_name = f\"{name}.{day}.zip\"\n\n with open(full_name, \"wb\") as target:\n shutil.copyfileobj(backup, target)\n\n print(f\"Exported {full_name}\")\n\n\[email protected]\ndef import_ctf(path):\n with app.app_context():\n import_ctf_util(path)\n\n\nif __name__ == \"__main__\":\n manager.run()\n", "path": "manage.py"}]}
| 3,265 | 268 |
gh_patches_debug_16150
|
rasdani/github-patches
|
git_diff
|
pre-commit__pre-commit-1707
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add warning for `files` / `exclude` containing `/*`
it's a common mistake to believe that those fields are globs, they're not -- they're regular expressions. (and matching `/*` is nonsense, it matches `''`, `'/'` as well as `'//////////////////////////////////////////////////////'`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pre_commit/clientlib.py`
Content:
```
1 import argparse
2 import functools
3 import logging
4 import shlex
5 import sys
6 from typing import Any
7 from typing import Dict
8 from typing import Optional
9 from typing import Sequence
10
11 import cfgv
12 from identify.identify import ALL_TAGS
13
14 import pre_commit.constants as C
15 from pre_commit.color import add_color_option
16 from pre_commit.errors import FatalError
17 from pre_commit.languages.all import all_languages
18 from pre_commit.logging_handler import logging_handler
19 from pre_commit.util import parse_version
20 from pre_commit.util import yaml_load
21
22 logger = logging.getLogger('pre_commit')
23
24 check_string_regex = cfgv.check_and(cfgv.check_string, cfgv.check_regex)
25
26
27 def check_type_tag(tag: str) -> None:
28 if tag not in ALL_TAGS:
29 raise cfgv.ValidationError(
30 f'Type tag {tag!r} is not recognized. '
31 f'Try upgrading identify and pre-commit?',
32 )
33
34
35 def check_min_version(version: str) -> None:
36 if parse_version(version) > parse_version(C.VERSION):
37 raise cfgv.ValidationError(
38 f'pre-commit version {version} is required but version '
39 f'{C.VERSION} is installed. '
40 f'Perhaps run `pip install --upgrade pre-commit`.',
41 )
42
43
44 def _make_argparser(filenames_help: str) -> argparse.ArgumentParser:
45 parser = argparse.ArgumentParser()
46 parser.add_argument('filenames', nargs='*', help=filenames_help)
47 parser.add_argument('-V', '--version', action='version', version=C.VERSION)
48 add_color_option(parser)
49 return parser
50
51
52 MANIFEST_HOOK_DICT = cfgv.Map(
53 'Hook', 'id',
54
55 cfgv.Required('id', cfgv.check_string),
56 cfgv.Required('name', cfgv.check_string),
57 cfgv.Required('entry', cfgv.check_string),
58 cfgv.Required('language', cfgv.check_one_of(all_languages)),
59 cfgv.Optional('alias', cfgv.check_string, ''),
60
61 cfgv.Optional('files', check_string_regex, ''),
62 cfgv.Optional('exclude', check_string_regex, '^$'),
63 cfgv.Optional('types', cfgv.check_array(check_type_tag), ['file']),
64 cfgv.Optional('types_or', cfgv.check_array(check_type_tag), ['file']),
65 cfgv.Optional('exclude_types', cfgv.check_array(check_type_tag), []),
66
67 cfgv.Optional(
68 'additional_dependencies', cfgv.check_array(cfgv.check_string), [],
69 ),
70 cfgv.Optional('args', cfgv.check_array(cfgv.check_string), []),
71 cfgv.Optional('always_run', cfgv.check_bool, False),
72 cfgv.Optional('pass_filenames', cfgv.check_bool, True),
73 cfgv.Optional('description', cfgv.check_string, ''),
74 cfgv.Optional('language_version', cfgv.check_string, C.DEFAULT),
75 cfgv.Optional('log_file', cfgv.check_string, ''),
76 cfgv.Optional('minimum_pre_commit_version', cfgv.check_string, '0'),
77 cfgv.Optional('require_serial', cfgv.check_bool, False),
78 cfgv.Optional('stages', cfgv.check_array(cfgv.check_one_of(C.STAGES)), []),
79 cfgv.Optional('verbose', cfgv.check_bool, False),
80 )
81 MANIFEST_SCHEMA = cfgv.Array(MANIFEST_HOOK_DICT)
82
83
84 class InvalidManifestError(FatalError):
85 pass
86
87
88 load_manifest = functools.partial(
89 cfgv.load_from_filename,
90 schema=MANIFEST_SCHEMA,
91 load_strategy=yaml_load,
92 exc_tp=InvalidManifestError,
93 )
94
95
96 def validate_manifest_main(argv: Optional[Sequence[str]] = None) -> int:
97 parser = _make_argparser('Manifest filenames.')
98 args = parser.parse_args(argv)
99
100 with logging_handler(args.color):
101 ret = 0
102 for filename in args.filenames:
103 try:
104 load_manifest(filename)
105 except InvalidManifestError as e:
106 print(e)
107 ret = 1
108 return ret
109
110
111 LOCAL = 'local'
112 META = 'meta'
113
114
115 class MigrateShaToRev:
116 key = 'rev'
117
118 @staticmethod
119 def _cond(key: str) -> cfgv.Conditional:
120 return cfgv.Conditional(
121 key, cfgv.check_string,
122 condition_key='repo',
123 condition_value=cfgv.NotIn(LOCAL, META),
124 ensure_absent=True,
125 )
126
127 def check(self, dct: Dict[str, Any]) -> None:
128 if dct.get('repo') in {LOCAL, META}:
129 self._cond('rev').check(dct)
130 self._cond('sha').check(dct)
131 elif 'sha' in dct and 'rev' in dct:
132 raise cfgv.ValidationError('Cannot specify both sha and rev')
133 elif 'sha' in dct:
134 self._cond('sha').check(dct)
135 else:
136 self._cond('rev').check(dct)
137
138 def apply_default(self, dct: Dict[str, Any]) -> None:
139 if 'sha' in dct:
140 dct['rev'] = dct.pop('sha')
141
142 remove_default = cfgv.Required.remove_default
143
144
145 def _entry(modname: str) -> str:
146 """the hook `entry` is passed through `shlex.split()` by the command
147 runner, so to prevent issues with spaces and backslashes (on Windows)
148 it must be quoted here.
149 """
150 return f'{shlex.quote(sys.executable)} -m pre_commit.meta_hooks.{modname}'
151
152
153 def warn_unknown_keys_root(
154 extra: Sequence[str],
155 orig_keys: Sequence[str],
156 dct: Dict[str, str],
157 ) -> None:
158 logger.warning(f'Unexpected key(s) present at root: {", ".join(extra)}')
159
160
161 def warn_unknown_keys_repo(
162 extra: Sequence[str],
163 orig_keys: Sequence[str],
164 dct: Dict[str, str],
165 ) -> None:
166 logger.warning(
167 f'Unexpected key(s) present on {dct["repo"]}: {", ".join(extra)}',
168 )
169
170
171 _meta = (
172 (
173 'check-hooks-apply', (
174 ('name', 'Check hooks apply to the repository'),
175 ('files', C.CONFIG_FILE),
176 ('entry', _entry('check_hooks_apply')),
177 ),
178 ),
179 (
180 'check-useless-excludes', (
181 ('name', 'Check for useless excludes'),
182 ('files', C.CONFIG_FILE),
183 ('entry', _entry('check_useless_excludes')),
184 ),
185 ),
186 (
187 'identity', (
188 ('name', 'identity'),
189 ('verbose', True),
190 ('entry', _entry('identity')),
191 ),
192 ),
193 )
194
195 META_HOOK_DICT = cfgv.Map(
196 'Hook', 'id',
197 cfgv.Required('id', cfgv.check_string),
198 cfgv.Required('id', cfgv.check_one_of(tuple(k for k, _ in _meta))),
199 # language must be system
200 cfgv.Optional('language', cfgv.check_one_of({'system'}), 'system'),
201 *(
202 # default to the hook definition for the meta hooks
203 cfgv.ConditionalOptional(key, cfgv.check_any, value, 'id', hook_id)
204 for hook_id, values in _meta
205 for key, value in values
206 ),
207 *(
208 # default to the "manifest" parsing
209 cfgv.OptionalNoDefault(item.key, item.check_fn)
210 # these will always be defaulted above
211 if item.key in {'name', 'language', 'entry'} else
212 item
213 for item in MANIFEST_HOOK_DICT.items
214 ),
215 )
216 CONFIG_HOOK_DICT = cfgv.Map(
217 'Hook', 'id',
218
219 cfgv.Required('id', cfgv.check_string),
220
221 # All keys in manifest hook dict are valid in a config hook dict, but
222 # are optional.
223 # No defaults are provided here as the config is merged on top of the
224 # manifest.
225 *(
226 cfgv.OptionalNoDefault(item.key, item.check_fn)
227 for item in MANIFEST_HOOK_DICT.items
228 if item.key != 'id'
229 ),
230 )
231 CONFIG_REPO_DICT = cfgv.Map(
232 'Repository', 'repo',
233
234 cfgv.Required('repo', cfgv.check_string),
235
236 cfgv.ConditionalRecurse(
237 'hooks', cfgv.Array(CONFIG_HOOK_DICT),
238 'repo', cfgv.NotIn(LOCAL, META),
239 ),
240 cfgv.ConditionalRecurse(
241 'hooks', cfgv.Array(MANIFEST_HOOK_DICT),
242 'repo', LOCAL,
243 ),
244 cfgv.ConditionalRecurse(
245 'hooks', cfgv.Array(META_HOOK_DICT),
246 'repo', META,
247 ),
248
249 MigrateShaToRev(),
250 cfgv.WarnAdditionalKeys(('repo', 'rev', 'hooks'), warn_unknown_keys_repo),
251 )
252 DEFAULT_LANGUAGE_VERSION = cfgv.Map(
253 'DefaultLanguageVersion', None,
254 cfgv.NoAdditionalKeys(all_languages),
255 *(cfgv.Optional(x, cfgv.check_string, C.DEFAULT) for x in all_languages),
256 )
257 CONFIG_SCHEMA = cfgv.Map(
258 'Config', None,
259
260 cfgv.RequiredRecurse('repos', cfgv.Array(CONFIG_REPO_DICT)),
261 cfgv.OptionalRecurse(
262 'default_language_version', DEFAULT_LANGUAGE_VERSION, {},
263 ),
264 cfgv.Optional(
265 'default_stages',
266 cfgv.check_array(cfgv.check_one_of(C.STAGES)),
267 C.STAGES,
268 ),
269 cfgv.Optional('files', check_string_regex, ''),
270 cfgv.Optional('exclude', check_string_regex, '^$'),
271 cfgv.Optional('fail_fast', cfgv.check_bool, False),
272 cfgv.Optional(
273 'minimum_pre_commit_version',
274 cfgv.check_and(cfgv.check_string, check_min_version),
275 '0',
276 ),
277 cfgv.WarnAdditionalKeys(
278 (
279 'repos',
280 'default_language_version',
281 'default_stages',
282 'files',
283 'exclude',
284 'fail_fast',
285 'minimum_pre_commit_version',
286 ),
287 warn_unknown_keys_root,
288 ),
289 )
290
291
292 class InvalidConfigError(FatalError):
293 pass
294
295
296 def ordered_load_normalize_legacy_config(contents: str) -> Dict[str, Any]:
297 data = yaml_load(contents)
298 if isinstance(data, list):
299 logger.warning(
300 'normalizing pre-commit configuration to a top-level map. '
301 'support for top level list will be removed in a future version. '
302 'run: `pre-commit migrate-config` to automatically fix this.',
303 )
304 return {'repos': data}
305 else:
306 return data
307
308
309 load_config = functools.partial(
310 cfgv.load_from_filename,
311 schema=CONFIG_SCHEMA,
312 load_strategy=ordered_load_normalize_legacy_config,
313 exc_tp=InvalidConfigError,
314 )
315
316
317 def validate_config_main(argv: Optional[Sequence[str]] = None) -> int:
318 parser = _make_argparser('Config filenames.')
319 args = parser.parse_args(argv)
320
321 with logging_handler(args.color):
322 ret = 0
323 for filename in args.filenames:
324 try:
325 load_config(filename)
326 except InvalidConfigError as e:
327 print(e)
328 ret = 1
329 return ret
330
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pre_commit/clientlib.py b/pre_commit/clientlib.py
--- a/pre_commit/clientlib.py
+++ b/pre_commit/clientlib.py
@@ -112,6 +112,18 @@
META = 'meta'
+class OptionalSensibleRegex(cfgv.OptionalNoDefault):
+ def check(self, dct: Dict[str, Any]) -> None:
+ super().check(dct)
+
+ if '/*' in dct.get(self.key, ''):
+ logger.warning(
+ f'The {self.key!r} field in hook {dct.get("id")!r} is a '
+ f"regex, not a glob -- matching '/*' probably isn't what you "
+ f'want here',
+ )
+
+
class MigrateShaToRev:
key = 'rev'
@@ -227,6 +239,8 @@
for item in MANIFEST_HOOK_DICT.items
if item.key != 'id'
),
+ OptionalSensibleRegex('files', cfgv.check_string),
+ OptionalSensibleRegex('exclude', cfgv.check_string),
)
CONFIG_REPO_DICT = cfgv.Map(
'Repository', 'repo',
|
{"golden_diff": "diff --git a/pre_commit/clientlib.py b/pre_commit/clientlib.py\n--- a/pre_commit/clientlib.py\n+++ b/pre_commit/clientlib.py\n@@ -112,6 +112,18 @@\n META = 'meta'\n \n \n+class OptionalSensibleRegex(cfgv.OptionalNoDefault):\n+ def check(self, dct: Dict[str, Any]) -> None:\n+ super().check(dct)\n+\n+ if '/*' in dct.get(self.key, ''):\n+ logger.warning(\n+ f'The {self.key!r} field in hook {dct.get(\"id\")!r} is a '\n+ f\"regex, not a glob -- matching '/*' probably isn't what you \"\n+ f'want here',\n+ )\n+\n+\n class MigrateShaToRev:\n key = 'rev'\n \n@@ -227,6 +239,8 @@\n for item in MANIFEST_HOOK_DICT.items\n if item.key != 'id'\n ),\n+ OptionalSensibleRegex('files', cfgv.check_string),\n+ OptionalSensibleRegex('exclude', cfgv.check_string),\n )\n CONFIG_REPO_DICT = cfgv.Map(\n 'Repository', 'repo',\n", "issue": "Add warning for `files` / `exclude` containing `/*`\nit's a common mistake to believe that those fields are globs, they're not -- they're regular expressions. (and matching `/*` is nonsense, it matches `''`, `'/'` as well as `'//////////////////////////////////////////////////////'`\n", "before_files": [{"content": "import argparse\nimport functools\nimport logging\nimport shlex\nimport sys\nfrom typing import Any\nfrom typing import Dict\nfrom typing import Optional\nfrom typing import Sequence\n\nimport cfgv\nfrom identify.identify import ALL_TAGS\n\nimport pre_commit.constants as C\nfrom pre_commit.color import add_color_option\nfrom pre_commit.errors import FatalError\nfrom pre_commit.languages.all import all_languages\nfrom pre_commit.logging_handler import logging_handler\nfrom pre_commit.util import parse_version\nfrom pre_commit.util import yaml_load\n\nlogger = logging.getLogger('pre_commit')\n\ncheck_string_regex = cfgv.check_and(cfgv.check_string, cfgv.check_regex)\n\n\ndef check_type_tag(tag: str) -> None:\n if tag not in ALL_TAGS:\n raise cfgv.ValidationError(\n f'Type tag {tag!r} is not recognized. '\n f'Try upgrading identify and pre-commit?',\n )\n\n\ndef check_min_version(version: str) -> None:\n if parse_version(version) > parse_version(C.VERSION):\n raise cfgv.ValidationError(\n f'pre-commit version {version} is required but version '\n f'{C.VERSION} is installed. '\n f'Perhaps run `pip install --upgrade pre-commit`.',\n )\n\n\ndef _make_argparser(filenames_help: str) -> argparse.ArgumentParser:\n parser = argparse.ArgumentParser()\n parser.add_argument('filenames', nargs='*', help=filenames_help)\n parser.add_argument('-V', '--version', action='version', version=C.VERSION)\n add_color_option(parser)\n return parser\n\n\nMANIFEST_HOOK_DICT = cfgv.Map(\n 'Hook', 'id',\n\n cfgv.Required('id', cfgv.check_string),\n cfgv.Required('name', cfgv.check_string),\n cfgv.Required('entry', cfgv.check_string),\n cfgv.Required('language', cfgv.check_one_of(all_languages)),\n cfgv.Optional('alias', cfgv.check_string, ''),\n\n cfgv.Optional('files', check_string_regex, ''),\n cfgv.Optional('exclude', check_string_regex, '^$'),\n cfgv.Optional('types', cfgv.check_array(check_type_tag), ['file']),\n cfgv.Optional('types_or', cfgv.check_array(check_type_tag), ['file']),\n cfgv.Optional('exclude_types', cfgv.check_array(check_type_tag), []),\n\n cfgv.Optional(\n 'additional_dependencies', cfgv.check_array(cfgv.check_string), [],\n ),\n cfgv.Optional('args', cfgv.check_array(cfgv.check_string), []),\n cfgv.Optional('always_run', cfgv.check_bool, False),\n cfgv.Optional('pass_filenames', cfgv.check_bool, True),\n cfgv.Optional('description', cfgv.check_string, ''),\n cfgv.Optional('language_version', cfgv.check_string, C.DEFAULT),\n cfgv.Optional('log_file', cfgv.check_string, ''),\n cfgv.Optional('minimum_pre_commit_version', cfgv.check_string, '0'),\n cfgv.Optional('require_serial', cfgv.check_bool, False),\n cfgv.Optional('stages', cfgv.check_array(cfgv.check_one_of(C.STAGES)), []),\n cfgv.Optional('verbose', cfgv.check_bool, False),\n)\nMANIFEST_SCHEMA = cfgv.Array(MANIFEST_HOOK_DICT)\n\n\nclass InvalidManifestError(FatalError):\n pass\n\n\nload_manifest = functools.partial(\n cfgv.load_from_filename,\n schema=MANIFEST_SCHEMA,\n load_strategy=yaml_load,\n exc_tp=InvalidManifestError,\n)\n\n\ndef validate_manifest_main(argv: Optional[Sequence[str]] = None) -> int:\n parser = _make_argparser('Manifest filenames.')\n args = parser.parse_args(argv)\n\n with logging_handler(args.color):\n ret = 0\n for filename in args.filenames:\n try:\n load_manifest(filename)\n except InvalidManifestError as e:\n print(e)\n ret = 1\n return ret\n\n\nLOCAL = 'local'\nMETA = 'meta'\n\n\nclass MigrateShaToRev:\n key = 'rev'\n\n @staticmethod\n def _cond(key: str) -> cfgv.Conditional:\n return cfgv.Conditional(\n key, cfgv.check_string,\n condition_key='repo',\n condition_value=cfgv.NotIn(LOCAL, META),\n ensure_absent=True,\n )\n\n def check(self, dct: Dict[str, Any]) -> None:\n if dct.get('repo') in {LOCAL, META}:\n self._cond('rev').check(dct)\n self._cond('sha').check(dct)\n elif 'sha' in dct and 'rev' in dct:\n raise cfgv.ValidationError('Cannot specify both sha and rev')\n elif 'sha' in dct:\n self._cond('sha').check(dct)\n else:\n self._cond('rev').check(dct)\n\n def apply_default(self, dct: Dict[str, Any]) -> None:\n if 'sha' in dct:\n dct['rev'] = dct.pop('sha')\n\n remove_default = cfgv.Required.remove_default\n\n\ndef _entry(modname: str) -> str:\n \"\"\"the hook `entry` is passed through `shlex.split()` by the command\n runner, so to prevent issues with spaces and backslashes (on Windows)\n it must be quoted here.\n \"\"\"\n return f'{shlex.quote(sys.executable)} -m pre_commit.meta_hooks.{modname}'\n\n\ndef warn_unknown_keys_root(\n extra: Sequence[str],\n orig_keys: Sequence[str],\n dct: Dict[str, str],\n) -> None:\n logger.warning(f'Unexpected key(s) present at root: {\", \".join(extra)}')\n\n\ndef warn_unknown_keys_repo(\n extra: Sequence[str],\n orig_keys: Sequence[str],\n dct: Dict[str, str],\n) -> None:\n logger.warning(\n f'Unexpected key(s) present on {dct[\"repo\"]}: {\", \".join(extra)}',\n )\n\n\n_meta = (\n (\n 'check-hooks-apply', (\n ('name', 'Check hooks apply to the repository'),\n ('files', C.CONFIG_FILE),\n ('entry', _entry('check_hooks_apply')),\n ),\n ),\n (\n 'check-useless-excludes', (\n ('name', 'Check for useless excludes'),\n ('files', C.CONFIG_FILE),\n ('entry', _entry('check_useless_excludes')),\n ),\n ),\n (\n 'identity', (\n ('name', 'identity'),\n ('verbose', True),\n ('entry', _entry('identity')),\n ),\n ),\n)\n\nMETA_HOOK_DICT = cfgv.Map(\n 'Hook', 'id',\n cfgv.Required('id', cfgv.check_string),\n cfgv.Required('id', cfgv.check_one_of(tuple(k for k, _ in _meta))),\n # language must be system\n cfgv.Optional('language', cfgv.check_one_of({'system'}), 'system'),\n *(\n # default to the hook definition for the meta hooks\n cfgv.ConditionalOptional(key, cfgv.check_any, value, 'id', hook_id)\n for hook_id, values in _meta\n for key, value in values\n ),\n *(\n # default to the \"manifest\" parsing\n cfgv.OptionalNoDefault(item.key, item.check_fn)\n # these will always be defaulted above\n if item.key in {'name', 'language', 'entry'} else\n item\n for item in MANIFEST_HOOK_DICT.items\n ),\n)\nCONFIG_HOOK_DICT = cfgv.Map(\n 'Hook', 'id',\n\n cfgv.Required('id', cfgv.check_string),\n\n # All keys in manifest hook dict are valid in a config hook dict, but\n # are optional.\n # No defaults are provided here as the config is merged on top of the\n # manifest.\n *(\n cfgv.OptionalNoDefault(item.key, item.check_fn)\n for item in MANIFEST_HOOK_DICT.items\n if item.key != 'id'\n ),\n)\nCONFIG_REPO_DICT = cfgv.Map(\n 'Repository', 'repo',\n\n cfgv.Required('repo', cfgv.check_string),\n\n cfgv.ConditionalRecurse(\n 'hooks', cfgv.Array(CONFIG_HOOK_DICT),\n 'repo', cfgv.NotIn(LOCAL, META),\n ),\n cfgv.ConditionalRecurse(\n 'hooks', cfgv.Array(MANIFEST_HOOK_DICT),\n 'repo', LOCAL,\n ),\n cfgv.ConditionalRecurse(\n 'hooks', cfgv.Array(META_HOOK_DICT),\n 'repo', META,\n ),\n\n MigrateShaToRev(),\n cfgv.WarnAdditionalKeys(('repo', 'rev', 'hooks'), warn_unknown_keys_repo),\n)\nDEFAULT_LANGUAGE_VERSION = cfgv.Map(\n 'DefaultLanguageVersion', None,\n cfgv.NoAdditionalKeys(all_languages),\n *(cfgv.Optional(x, cfgv.check_string, C.DEFAULT) for x in all_languages),\n)\nCONFIG_SCHEMA = cfgv.Map(\n 'Config', None,\n\n cfgv.RequiredRecurse('repos', cfgv.Array(CONFIG_REPO_DICT)),\n cfgv.OptionalRecurse(\n 'default_language_version', DEFAULT_LANGUAGE_VERSION, {},\n ),\n cfgv.Optional(\n 'default_stages',\n cfgv.check_array(cfgv.check_one_of(C.STAGES)),\n C.STAGES,\n ),\n cfgv.Optional('files', check_string_regex, ''),\n cfgv.Optional('exclude', check_string_regex, '^$'),\n cfgv.Optional('fail_fast', cfgv.check_bool, False),\n cfgv.Optional(\n 'minimum_pre_commit_version',\n cfgv.check_and(cfgv.check_string, check_min_version),\n '0',\n ),\n cfgv.WarnAdditionalKeys(\n (\n 'repos',\n 'default_language_version',\n 'default_stages',\n 'files',\n 'exclude',\n 'fail_fast',\n 'minimum_pre_commit_version',\n ),\n warn_unknown_keys_root,\n ),\n)\n\n\nclass InvalidConfigError(FatalError):\n pass\n\n\ndef ordered_load_normalize_legacy_config(contents: str) -> Dict[str, Any]:\n data = yaml_load(contents)\n if isinstance(data, list):\n logger.warning(\n 'normalizing pre-commit configuration to a top-level map. '\n 'support for top level list will be removed in a future version. '\n 'run: `pre-commit migrate-config` to automatically fix this.',\n )\n return {'repos': data}\n else:\n return data\n\n\nload_config = functools.partial(\n cfgv.load_from_filename,\n schema=CONFIG_SCHEMA,\n load_strategy=ordered_load_normalize_legacy_config,\n exc_tp=InvalidConfigError,\n)\n\n\ndef validate_config_main(argv: Optional[Sequence[str]] = None) -> int:\n parser = _make_argparser('Config filenames.')\n args = parser.parse_args(argv)\n\n with logging_handler(args.color):\n ret = 0\n for filename in args.filenames:\n try:\n load_config(filename)\n except InvalidConfigError as e:\n print(e)\n ret = 1\n return ret\n", "path": "pre_commit/clientlib.py"}], "after_files": [{"content": "import argparse\nimport functools\nimport logging\nimport shlex\nimport sys\nfrom typing import Any\nfrom typing import Dict\nfrom typing import Optional\nfrom typing import Sequence\n\nimport cfgv\nfrom identify.identify import ALL_TAGS\n\nimport pre_commit.constants as C\nfrom pre_commit.color import add_color_option\nfrom pre_commit.errors import FatalError\nfrom pre_commit.languages.all import all_languages\nfrom pre_commit.logging_handler import logging_handler\nfrom pre_commit.util import parse_version\nfrom pre_commit.util import yaml_load\n\nlogger = logging.getLogger('pre_commit')\n\ncheck_string_regex = cfgv.check_and(cfgv.check_string, cfgv.check_regex)\n\n\ndef check_type_tag(tag: str) -> None:\n if tag not in ALL_TAGS:\n raise cfgv.ValidationError(\n f'Type tag {tag!r} is not recognized. '\n f'Try upgrading identify and pre-commit?',\n )\n\n\ndef check_min_version(version: str) -> None:\n if parse_version(version) > parse_version(C.VERSION):\n raise cfgv.ValidationError(\n f'pre-commit version {version} is required but version '\n f'{C.VERSION} is installed. '\n f'Perhaps run `pip install --upgrade pre-commit`.',\n )\n\n\ndef _make_argparser(filenames_help: str) -> argparse.ArgumentParser:\n parser = argparse.ArgumentParser()\n parser.add_argument('filenames', nargs='*', help=filenames_help)\n parser.add_argument('-V', '--version', action='version', version=C.VERSION)\n add_color_option(parser)\n return parser\n\n\nMANIFEST_HOOK_DICT = cfgv.Map(\n 'Hook', 'id',\n\n cfgv.Required('id', cfgv.check_string),\n cfgv.Required('name', cfgv.check_string),\n cfgv.Required('entry', cfgv.check_string),\n cfgv.Required('language', cfgv.check_one_of(all_languages)),\n cfgv.Optional('alias', cfgv.check_string, ''),\n\n cfgv.Optional('files', check_string_regex, ''),\n cfgv.Optional('exclude', check_string_regex, '^$'),\n cfgv.Optional('types', cfgv.check_array(check_type_tag), ['file']),\n cfgv.Optional('types_or', cfgv.check_array(check_type_tag), ['file']),\n cfgv.Optional('exclude_types', cfgv.check_array(check_type_tag), []),\n\n cfgv.Optional(\n 'additional_dependencies', cfgv.check_array(cfgv.check_string), [],\n ),\n cfgv.Optional('args', cfgv.check_array(cfgv.check_string), []),\n cfgv.Optional('always_run', cfgv.check_bool, False),\n cfgv.Optional('pass_filenames', cfgv.check_bool, True),\n cfgv.Optional('description', cfgv.check_string, ''),\n cfgv.Optional('language_version', cfgv.check_string, C.DEFAULT),\n cfgv.Optional('log_file', cfgv.check_string, ''),\n cfgv.Optional('minimum_pre_commit_version', cfgv.check_string, '0'),\n cfgv.Optional('require_serial', cfgv.check_bool, False),\n cfgv.Optional('stages', cfgv.check_array(cfgv.check_one_of(C.STAGES)), []),\n cfgv.Optional('verbose', cfgv.check_bool, False),\n)\nMANIFEST_SCHEMA = cfgv.Array(MANIFEST_HOOK_DICT)\n\n\nclass InvalidManifestError(FatalError):\n pass\n\n\nload_manifest = functools.partial(\n cfgv.load_from_filename,\n schema=MANIFEST_SCHEMA,\n load_strategy=yaml_load,\n exc_tp=InvalidManifestError,\n)\n\n\ndef validate_manifest_main(argv: Optional[Sequence[str]] = None) -> int:\n parser = _make_argparser('Manifest filenames.')\n args = parser.parse_args(argv)\n\n with logging_handler(args.color):\n ret = 0\n for filename in args.filenames:\n try:\n load_manifest(filename)\n except InvalidManifestError as e:\n print(e)\n ret = 1\n return ret\n\n\nLOCAL = 'local'\nMETA = 'meta'\n\n\nclass OptionalSensibleRegex(cfgv.OptionalNoDefault):\n def check(self, dct: Dict[str, Any]) -> None:\n super().check(dct)\n\n if '/*' in dct.get(self.key, ''):\n logger.warning(\n f'The {self.key!r} field in hook {dct.get(\"id\")!r} is a '\n f\"regex, not a glob -- matching '/*' probably isn't what you \"\n f'want here',\n )\n\n\nclass MigrateShaToRev:\n key = 'rev'\n\n @staticmethod\n def _cond(key: str) -> cfgv.Conditional:\n return cfgv.Conditional(\n key, cfgv.check_string,\n condition_key='repo',\n condition_value=cfgv.NotIn(LOCAL, META),\n ensure_absent=True,\n )\n\n def check(self, dct: Dict[str, Any]) -> None:\n if dct.get('repo') in {LOCAL, META}:\n self._cond('rev').check(dct)\n self._cond('sha').check(dct)\n elif 'sha' in dct and 'rev' in dct:\n raise cfgv.ValidationError('Cannot specify both sha and rev')\n elif 'sha' in dct:\n self._cond('sha').check(dct)\n else:\n self._cond('rev').check(dct)\n\n def apply_default(self, dct: Dict[str, Any]) -> None:\n if 'sha' in dct:\n dct['rev'] = dct.pop('sha')\n\n remove_default = cfgv.Required.remove_default\n\n\ndef _entry(modname: str) -> str:\n \"\"\"the hook `entry` is passed through `shlex.split()` by the command\n runner, so to prevent issues with spaces and backslashes (on Windows)\n it must be quoted here.\n \"\"\"\n return f'{shlex.quote(sys.executable)} -m pre_commit.meta_hooks.{modname}'\n\n\ndef warn_unknown_keys_root(\n extra: Sequence[str],\n orig_keys: Sequence[str],\n dct: Dict[str, str],\n) -> None:\n logger.warning(f'Unexpected key(s) present at root: {\", \".join(extra)}')\n\n\ndef warn_unknown_keys_repo(\n extra: Sequence[str],\n orig_keys: Sequence[str],\n dct: Dict[str, str],\n) -> None:\n logger.warning(\n f'Unexpected key(s) present on {dct[\"repo\"]}: {\", \".join(extra)}',\n )\n\n\n_meta = (\n (\n 'check-hooks-apply', (\n ('name', 'Check hooks apply to the repository'),\n ('files', C.CONFIG_FILE),\n ('entry', _entry('check_hooks_apply')),\n ),\n ),\n (\n 'check-useless-excludes', (\n ('name', 'Check for useless excludes'),\n ('files', C.CONFIG_FILE),\n ('entry', _entry('check_useless_excludes')),\n ),\n ),\n (\n 'identity', (\n ('name', 'identity'),\n ('verbose', True),\n ('entry', _entry('identity')),\n ),\n ),\n)\n\nMETA_HOOK_DICT = cfgv.Map(\n 'Hook', 'id',\n cfgv.Required('id', cfgv.check_string),\n cfgv.Required('id', cfgv.check_one_of(tuple(k for k, _ in _meta))),\n # language must be system\n cfgv.Optional('language', cfgv.check_one_of({'system'}), 'system'),\n *(\n # default to the hook definition for the meta hooks\n cfgv.ConditionalOptional(key, cfgv.check_any, value, 'id', hook_id)\n for hook_id, values in _meta\n for key, value in values\n ),\n *(\n # default to the \"manifest\" parsing\n cfgv.OptionalNoDefault(item.key, item.check_fn)\n # these will always be defaulted above\n if item.key in {'name', 'language', 'entry'} else\n item\n for item in MANIFEST_HOOK_DICT.items\n ),\n)\nCONFIG_HOOK_DICT = cfgv.Map(\n 'Hook', 'id',\n\n cfgv.Required('id', cfgv.check_string),\n\n # All keys in manifest hook dict are valid in a config hook dict, but\n # are optional.\n # No defaults are provided here as the config is merged on top of the\n # manifest.\n *(\n cfgv.OptionalNoDefault(item.key, item.check_fn)\n for item in MANIFEST_HOOK_DICT.items\n if item.key != 'id'\n ),\n OptionalSensibleRegex('files', cfgv.check_string),\n OptionalSensibleRegex('exclude', cfgv.check_string),\n)\nCONFIG_REPO_DICT = cfgv.Map(\n 'Repository', 'repo',\n\n cfgv.Required('repo', cfgv.check_string),\n\n cfgv.ConditionalRecurse(\n 'hooks', cfgv.Array(CONFIG_HOOK_DICT),\n 'repo', cfgv.NotIn(LOCAL, META),\n ),\n cfgv.ConditionalRecurse(\n 'hooks', cfgv.Array(MANIFEST_HOOK_DICT),\n 'repo', LOCAL,\n ),\n cfgv.ConditionalRecurse(\n 'hooks', cfgv.Array(META_HOOK_DICT),\n 'repo', META,\n ),\n\n MigrateShaToRev(),\n cfgv.WarnAdditionalKeys(('repo', 'rev', 'hooks'), warn_unknown_keys_repo),\n)\nDEFAULT_LANGUAGE_VERSION = cfgv.Map(\n 'DefaultLanguageVersion', None,\n cfgv.NoAdditionalKeys(all_languages),\n *(cfgv.Optional(x, cfgv.check_string, C.DEFAULT) for x in all_languages),\n)\nCONFIG_SCHEMA = cfgv.Map(\n 'Config', None,\n\n cfgv.RequiredRecurse('repos', cfgv.Array(CONFIG_REPO_DICT)),\n cfgv.OptionalRecurse(\n 'default_language_version', DEFAULT_LANGUAGE_VERSION, {},\n ),\n cfgv.Optional(\n 'default_stages',\n cfgv.check_array(cfgv.check_one_of(C.STAGES)),\n C.STAGES,\n ),\n cfgv.Optional('files', check_string_regex, ''),\n cfgv.Optional('exclude', check_string_regex, '^$'),\n cfgv.Optional('fail_fast', cfgv.check_bool, False),\n cfgv.Optional(\n 'minimum_pre_commit_version',\n cfgv.check_and(cfgv.check_string, check_min_version),\n '0',\n ),\n cfgv.WarnAdditionalKeys(\n (\n 'repos',\n 'default_language_version',\n 'default_stages',\n 'files',\n 'exclude',\n 'fail_fast',\n 'minimum_pre_commit_version',\n ),\n warn_unknown_keys_root,\n ),\n)\n\n\nclass InvalidConfigError(FatalError):\n pass\n\n\ndef ordered_load_normalize_legacy_config(contents: str) -> Dict[str, Any]:\n data = yaml_load(contents)\n if isinstance(data, list):\n logger.warning(\n 'normalizing pre-commit configuration to a top-level map. '\n 'support for top level list will be removed in a future version. '\n 'run: `pre-commit migrate-config` to automatically fix this.',\n )\n return {'repos': data}\n else:\n return data\n\n\nload_config = functools.partial(\n cfgv.load_from_filename,\n schema=CONFIG_SCHEMA,\n load_strategy=ordered_load_normalize_legacy_config,\n exc_tp=InvalidConfigError,\n)\n\n\ndef validate_config_main(argv: Optional[Sequence[str]] = None) -> int:\n parser = _make_argparser('Config filenames.')\n args = parser.parse_args(argv)\n\n with logging_handler(args.color):\n ret = 0\n for filename in args.filenames:\n try:\n load_config(filename)\n except InvalidConfigError as e:\n print(e)\n ret = 1\n return ret\n", "path": "pre_commit/clientlib.py"}]}
| 3,568 | 260 |
gh_patches_debug_29268
|
rasdani/github-patches
|
git_diff
|
strawberry-graphql__strawberry-2141
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Deleting generator in subscription clean up will not call a 'finally' statement
<!-- Provide a general summary of the bug in the title above. -->
<!--- This template is entirely optional and can be removed, but is here to help both you and us. -->
<!--- Anything on lines wrapped in comments like these will not show up in the final text. -->
## Describe the Bug
I have found an issue with the way subscriptions are cleaned up when using the graphql-transport-ws protocol.
My subscription returns a AsyncIterator which look something like this:
```
async def get_value() -> AsyncIterator[int]:
try:
while True:
# Call some other code to
# get the value from another object
finally:
# Do some important cleanup here.
```
I have multiple subscriptions, which are being updated at 10Hz. When I cancel the subscriptions (for example, close the webpage calling subscribe), I get a `ConnectionResetError: "Cannot write to closing transport"` on the server side, presumably as the task has not yet been cancelled and is trying to send an updated value while the websocket is closing. Looking at the Strawberry code that handles this (subscriptions/protocols/graphql_transport_ws/handlers.py), I can see that this error is caught as a `BaseException` ([here](https://github.com/strawberry-graphql/strawberry/blob/fe9737b9d218f437500db215525a147de2ea1d64/strawberry/subscriptions/protocols/graphql_transport_ws/handlers.py#L218)) and so this task and generator are deleted from the internal dictionaries.
What I have found however, is that when the generator is simply deleted in this clean up, the 'finally' statement in my try-finally implementation does not get executed and so I cannot perform any clean up. I have seen this lead to a memory leak, which is only stopped when the garbage collector comes round and runs the 'finally' statement.
To ensure the generator runs to completion and executes the 'finally' statement, I wonder whether the generator should be explicitly closed first, before deleting it from the internal subscriptions list, much like is done in the cleanup_operation() method ([here](https://github.com/strawberry-graphql/strawberry/blob/fe9737b9d218f437500db215525a147de2ea1d64/strawberry/subscriptions/protocols/graphql_transport_ws/handlers.py#L282?)).
## System Information
- Operating system: Centos 7
- Strawberry version (if applicable): 0.121.1
- Python version: 3.8.12
## Additional Context
<!-- Add any other relevant information about the problem here. -->
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `strawberry/subscriptions/protocols/graphql_transport_ws/handlers.py`
Content:
```
1 import asyncio
2 from abc import ABC, abstractmethod
3 from contextlib import suppress
4 from datetime import timedelta
5 from typing import Any, AsyncGenerator, Callable, Dict, List, Optional
6
7 from graphql import (
8 ExecutionResult as GraphQLExecutionResult,
9 GraphQLError,
10 GraphQLSyntaxError,
11 parse,
12 )
13 from graphql.error.graphql_error import format_error as format_graphql_error
14
15 from strawberry.schema import BaseSchema
16 from strawberry.subscriptions.protocols.graphql_transport_ws.types import (
17 CompleteMessage,
18 ConnectionAckMessage,
19 ConnectionInitMessage,
20 ErrorMessage,
21 GraphQLTransportMessage,
22 NextMessage,
23 PingMessage,
24 PongMessage,
25 SubscribeMessage,
26 SubscribeMessagePayload,
27 )
28 from strawberry.types.graphql import OperationType
29 from strawberry.utils.debug import pretty_print_graphql_operation
30 from strawberry.utils.operation import get_operation_type
31
32
33 class BaseGraphQLTransportWSHandler(ABC):
34 def __init__(
35 self,
36 schema: BaseSchema,
37 debug: bool,
38 connection_init_wait_timeout: timedelta,
39 ):
40 self.schema = schema
41 self.debug = debug
42 self.connection_init_wait_timeout = connection_init_wait_timeout
43 self.connection_init_timeout_task: Optional[asyncio.Task] = None
44 self.connection_init_received = False
45 self.connection_acknowledged = False
46 self.subscriptions: Dict[str, AsyncGenerator] = {}
47 self.tasks: Dict[str, asyncio.Task] = {}
48 self.completed_tasks: List[asyncio.Task] = []
49
50 @abstractmethod
51 async def get_context(self) -> Any:
52 """Return the operations context"""
53
54 @abstractmethod
55 async def get_root_value(self) -> Any:
56 """Return the schemas root value"""
57
58 @abstractmethod
59 async def send_json(self, data: dict) -> None:
60 """Send the data JSON encoded to the WebSocket client"""
61
62 @abstractmethod
63 async def close(self, code: int, reason: str) -> None:
64 """Close the WebSocket with the passed code and reason"""
65
66 @abstractmethod
67 async def handle_request(self) -> Any:
68 """Handle the request this instance was created for"""
69
70 async def handle(self) -> Any:
71 timeout_handler = self.handle_connection_init_timeout()
72 self.connection_init_timeout_task = asyncio.create_task(timeout_handler)
73 return await self.handle_request()
74
75 async def handle_connection_init_timeout(self):
76 delay = self.connection_init_wait_timeout.total_seconds()
77 await asyncio.sleep(delay=delay)
78
79 if self.connection_init_received:
80 return
81
82 reason = "Connection initialisation timeout"
83 await self.close(code=4408, reason=reason)
84
85 async def handle_message(self, message: dict):
86 handler: Callable
87 handler_arg: Any
88 try:
89 message_type = message.pop("type")
90
91 if message_type == ConnectionInitMessage.type:
92 handler = self.handle_connection_init
93 handler_arg = ConnectionInitMessage(**message)
94
95 elif message_type == PingMessage.type:
96 handler = self.handle_ping
97 handler_arg = PingMessage(**message)
98
99 elif message_type == PongMessage.type:
100 handler = self.handle_pong
101 handler_arg = PongMessage(**message)
102
103 elif message_type == SubscribeMessage.type:
104 handler = self.handle_subscribe
105 payload = SubscribeMessagePayload(**message.pop("payload"))
106 handler_arg = SubscribeMessage(payload=payload, **message)
107
108 elif message_type == CompleteMessage.type:
109 handler = self.handle_complete
110 handler_arg = CompleteMessage(**message)
111
112 else:
113 handler = self.handle_invalid_message
114 handler_arg = f"Unknown message type: {message_type}"
115
116 except (KeyError, TypeError):
117 handler = self.handle_invalid_message
118 handler_arg = "Failed to parse message"
119
120 await handler(handler_arg)
121 await self.reap_completed_tasks()
122
123 async def handle_connection_init(self, message: ConnectionInitMessage) -> None:
124 if self.connection_init_received:
125 reason = "Too many initialisation requests"
126 await self.close(code=4429, reason=reason)
127 return
128
129 self.connection_init_received = True
130 await self.send_message(ConnectionAckMessage())
131 self.connection_acknowledged = True
132
133 async def handle_ping(self, message: PingMessage) -> None:
134 await self.send_message(PongMessage())
135
136 async def handle_pong(self, message: PongMessage) -> None:
137 pass
138
139 async def handle_subscribe(self, message: SubscribeMessage) -> None:
140 if not self.connection_acknowledged:
141 await self.close(code=4401, reason="Unauthorized")
142 return
143
144 try:
145 graphql_document = parse(message.payload.query)
146 except GraphQLSyntaxError as exc:
147 await self.close(code=4400, reason=exc.message)
148 return
149
150 try:
151 operation_type = get_operation_type(
152 graphql_document, message.payload.operationName
153 )
154 except RuntimeError:
155 await self.close(code=4400, reason="Can't get GraphQL operation type")
156 return
157
158 if message.id in self.subscriptions:
159 reason = f"Subscriber for {message.id} already exists"
160 await self.close(code=4409, reason=reason)
161 return
162
163 if self.debug: # pragma: no cover
164 pretty_print_graphql_operation(
165 message.payload.operationName,
166 message.payload.query,
167 message.payload.variables,
168 )
169
170 context = await self.get_context()
171 root_value = await self.get_root_value()
172
173 # Get an AsyncGenerator yielding the results
174 if operation_type == OperationType.SUBSCRIPTION:
175 result_source = await self.schema.subscribe(
176 query=message.payload.query,
177 variable_values=message.payload.variables,
178 operation_name=message.payload.operationName,
179 context_value=context,
180 root_value=root_value,
181 )
182 else:
183 # create AsyncGenerator returning a single result
184 async def get_result_source():
185 yield await self.schema.execute(
186 query=message.payload.query,
187 variable_values=message.payload.variables,
188 context_value=context,
189 root_value=root_value,
190 operation_name=message.payload.operationName,
191 )
192
193 result_source = get_result_source()
194
195 # Handle initial validation errors
196 if isinstance(result_source, GraphQLExecutionResult):
197 assert result_source.errors
198 payload = [format_graphql_error(result_source.errors[0])]
199 await self.send_message(ErrorMessage(id=message.id, payload=payload))
200 self.schema.process_errors(result_source.errors)
201 return
202
203 # Create task to handle this subscription, reserve the operation ID
204 self.subscriptions[message.id] = result_source
205 self.tasks[message.id] = asyncio.create_task(
206 self.operation_task(result_source, message.id)
207 )
208
209 async def operation_task(
210 self, result_source: AsyncGenerator, operation_id: str
211 ) -> None:
212 """
213 Operation task top level method. Cleans up and de-registers the operation
214 once it is done.
215 """
216 try:
217 await self.handle_async_results(result_source, operation_id)
218 except BaseException: # pragma: no cover
219 # cleanup in case of something really unexpected
220 del self.subscriptions[operation_id]
221 del self.tasks[operation_id]
222 raise
223 else:
224 # de-register the operation _before_ sending the `Complete` message
225 # to make the `operation_id` immediately available for re-use
226 del self.subscriptions[operation_id]
227 del self.tasks[operation_id]
228 await self.send_message(CompleteMessage(id=operation_id))
229 finally:
230 # add this task to a list to be reaped later
231 task = asyncio.current_task()
232 assert task is not None
233 self.completed_tasks.append(task)
234
235 async def handle_async_results(
236 self,
237 result_source: AsyncGenerator,
238 operation_id: str,
239 ) -> None:
240 try:
241 async for result in result_source:
242 if result.errors:
243 error_payload = [format_graphql_error(err) for err in result.errors]
244 error_message = ErrorMessage(id=operation_id, payload=error_payload)
245 await self.send_message(error_message)
246 self.schema.process_errors(result.errors)
247 return
248 else:
249 next_payload = {"data": result.data}
250 next_message = NextMessage(id=operation_id, payload=next_payload)
251 await self.send_message(next_message)
252 except asyncio.CancelledError:
253 # CancelledErrors are expected during task cleanup.
254 return
255 except Exception as error:
256 # GraphQLErrors are handled by graphql-core and included in the
257 # ExecutionResult
258 error = GraphQLError(str(error), original_error=error)
259 error_payload = [format_graphql_error(error)]
260 error_message = ErrorMessage(id=operation_id, payload=error_payload)
261 await self.send_message(error_message)
262 self.schema.process_errors([error])
263 return
264
265 async def handle_complete(self, message: CompleteMessage) -> None:
266 await self.cleanup_operation(operation_id=message.id)
267
268 async def handle_invalid_message(self, error_message: str) -> None:
269 await self.close(code=4400, reason=error_message)
270
271 async def send_message(self, message: GraphQLTransportMessage) -> None:
272 data = message.as_dict()
273 await self.send_json(data)
274
275 async def cleanup_operation(self, operation_id: str) -> None:
276 if operation_id not in self.subscriptions:
277 return
278 generator = self.subscriptions.pop(operation_id)
279 task = self.tasks.pop(operation_id)
280 # since python 3.8, generators cannot be reliably closed
281 with suppress(RuntimeError):
282 await generator.aclose()
283 task.cancel()
284 with suppress(BaseException):
285 await task
286
287 async def reap_completed_tasks(self) -> None:
288 """
289 Await tasks that have completed
290 """
291 tasks, self.completed_tasks = self.completed_tasks, []
292 for task in tasks:
293 with suppress(BaseException):
294 await task
295
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/strawberry/subscriptions/protocols/graphql_transport_ws/handlers.py b/strawberry/subscriptions/protocols/graphql_transport_ws/handlers.py
--- a/strawberry/subscriptions/protocols/graphql_transport_ws/handlers.py
+++ b/strawberry/subscriptions/protocols/graphql_transport_ws/handlers.py
@@ -217,6 +217,11 @@
await self.handle_async_results(result_source, operation_id)
except BaseException: # pragma: no cover
# cleanup in case of something really unexpected
+ # wait for generator to be closed to ensure that any existing
+ # 'finally' statement is called
+ result_source = self.subscriptions[operation_id]
+ with suppress(RuntimeError):
+ await result_source.aclose()
del self.subscriptions[operation_id]
del self.tasks[operation_id]
raise
@@ -275,14 +280,14 @@
async def cleanup_operation(self, operation_id: str) -> None:
if operation_id not in self.subscriptions:
return
- generator = self.subscriptions.pop(operation_id)
+ result_source = self.subscriptions.pop(operation_id)
task = self.tasks.pop(operation_id)
- # since python 3.8, generators cannot be reliably closed
- with suppress(RuntimeError):
- await generator.aclose()
task.cancel()
with suppress(BaseException):
await task
+ # since python 3.8, generators cannot be reliably closed
+ with suppress(RuntimeError):
+ await result_source.aclose()
async def reap_completed_tasks(self) -> None:
"""
|
{"golden_diff": "diff --git a/strawberry/subscriptions/protocols/graphql_transport_ws/handlers.py b/strawberry/subscriptions/protocols/graphql_transport_ws/handlers.py\n--- a/strawberry/subscriptions/protocols/graphql_transport_ws/handlers.py\n+++ b/strawberry/subscriptions/protocols/graphql_transport_ws/handlers.py\n@@ -217,6 +217,11 @@\n await self.handle_async_results(result_source, operation_id)\n except BaseException: # pragma: no cover\n # cleanup in case of something really unexpected\n+ # wait for generator to be closed to ensure that any existing\n+ # 'finally' statement is called\n+ result_source = self.subscriptions[operation_id]\n+ with suppress(RuntimeError):\n+ await result_source.aclose()\n del self.subscriptions[operation_id]\n del self.tasks[operation_id]\n raise\n@@ -275,14 +280,14 @@\n async def cleanup_operation(self, operation_id: str) -> None:\n if operation_id not in self.subscriptions:\n return\n- generator = self.subscriptions.pop(operation_id)\n+ result_source = self.subscriptions.pop(operation_id)\n task = self.tasks.pop(operation_id)\n- # since python 3.8, generators cannot be reliably closed\n- with suppress(RuntimeError):\n- await generator.aclose()\n task.cancel()\n with suppress(BaseException):\n await task\n+ # since python 3.8, generators cannot be reliably closed\n+ with suppress(RuntimeError):\n+ await result_source.aclose()\n \n async def reap_completed_tasks(self) -> None:\n \"\"\"\n", "issue": "Deleting generator in subscription clean up will not call a 'finally' statement\n<!-- Provide a general summary of the bug in the title above. -->\r\n\r\n<!--- This template is entirely optional and can be removed, but is here to help both you and us. -->\r\n<!--- Anything on lines wrapped in comments like these will not show up in the final text. -->\r\n\r\n## Describe the Bug\r\nI have found an issue with the way subscriptions are cleaned up when using the graphql-transport-ws protocol. \r\n\r\nMy subscription returns a AsyncIterator which look something like this:\r\n```\r\nasync def get_value() -> AsyncIterator[int]:\r\n try:\r\n while True:\r\n # Call some other code to \r\n # get the value from another object\r\n finally:\r\n # Do some important cleanup here.\r\n```\r\nI have multiple subscriptions, which are being updated at 10Hz. When I cancel the subscriptions (for example, close the webpage calling subscribe), I get a `ConnectionResetError: \"Cannot write to closing transport\"` on the server side, presumably as the task has not yet been cancelled and is trying to send an updated value while the websocket is closing. Looking at the Strawberry code that handles this (subscriptions/protocols/graphql_transport_ws/handlers.py), I can see that this error is caught as a `BaseException` ([here](https://github.com/strawberry-graphql/strawberry/blob/fe9737b9d218f437500db215525a147de2ea1d64/strawberry/subscriptions/protocols/graphql_transport_ws/handlers.py#L218)) and so this task and generator are deleted from the internal dictionaries. \r\n\r\nWhat I have found however, is that when the generator is simply deleted in this clean up, the 'finally' statement in my try-finally implementation does not get executed and so I cannot perform any clean up. I have seen this lead to a memory leak, which is only stopped when the garbage collector comes round and runs the 'finally' statement. \r\n\r\nTo ensure the generator runs to completion and executes the 'finally' statement, I wonder whether the generator should be explicitly closed first, before deleting it from the internal subscriptions list, much like is done in the cleanup_operation() method ([here](https://github.com/strawberry-graphql/strawberry/blob/fe9737b9d218f437500db215525a147de2ea1d64/strawberry/subscriptions/protocols/graphql_transport_ws/handlers.py#L282?)).\r\n\r\n## System Information\r\n\r\n - Operating system: Centos 7\r\n - Strawberry version (if applicable): 0.121.1\r\n - Python version: 3.8.12\r\n\r\n## Additional Context\r\n\r\n<!-- Add any other relevant information about the problem here. -->\r\n\n", "before_files": [{"content": "import asyncio\nfrom abc import ABC, abstractmethod\nfrom contextlib import suppress\nfrom datetime import timedelta\nfrom typing import Any, AsyncGenerator, Callable, Dict, List, Optional\n\nfrom graphql import (\n ExecutionResult as GraphQLExecutionResult,\n GraphQLError,\n GraphQLSyntaxError,\n parse,\n)\nfrom graphql.error.graphql_error import format_error as format_graphql_error\n\nfrom strawberry.schema import BaseSchema\nfrom strawberry.subscriptions.protocols.graphql_transport_ws.types import (\n CompleteMessage,\n ConnectionAckMessage,\n ConnectionInitMessage,\n ErrorMessage,\n GraphQLTransportMessage,\n NextMessage,\n PingMessage,\n PongMessage,\n SubscribeMessage,\n SubscribeMessagePayload,\n)\nfrom strawberry.types.graphql import OperationType\nfrom strawberry.utils.debug import pretty_print_graphql_operation\nfrom strawberry.utils.operation import get_operation_type\n\n\nclass BaseGraphQLTransportWSHandler(ABC):\n def __init__(\n self,\n schema: BaseSchema,\n debug: bool,\n connection_init_wait_timeout: timedelta,\n ):\n self.schema = schema\n self.debug = debug\n self.connection_init_wait_timeout = connection_init_wait_timeout\n self.connection_init_timeout_task: Optional[asyncio.Task] = None\n self.connection_init_received = False\n self.connection_acknowledged = False\n self.subscriptions: Dict[str, AsyncGenerator] = {}\n self.tasks: Dict[str, asyncio.Task] = {}\n self.completed_tasks: List[asyncio.Task] = []\n\n @abstractmethod\n async def get_context(self) -> Any:\n \"\"\"Return the operations context\"\"\"\n\n @abstractmethod\n async def get_root_value(self) -> Any:\n \"\"\"Return the schemas root value\"\"\"\n\n @abstractmethod\n async def send_json(self, data: dict) -> None:\n \"\"\"Send the data JSON encoded to the WebSocket client\"\"\"\n\n @abstractmethod\n async def close(self, code: int, reason: str) -> None:\n \"\"\"Close the WebSocket with the passed code and reason\"\"\"\n\n @abstractmethod\n async def handle_request(self) -> Any:\n \"\"\"Handle the request this instance was created for\"\"\"\n\n async def handle(self) -> Any:\n timeout_handler = self.handle_connection_init_timeout()\n self.connection_init_timeout_task = asyncio.create_task(timeout_handler)\n return await self.handle_request()\n\n async def handle_connection_init_timeout(self):\n delay = self.connection_init_wait_timeout.total_seconds()\n await asyncio.sleep(delay=delay)\n\n if self.connection_init_received:\n return\n\n reason = \"Connection initialisation timeout\"\n await self.close(code=4408, reason=reason)\n\n async def handle_message(self, message: dict):\n handler: Callable\n handler_arg: Any\n try:\n message_type = message.pop(\"type\")\n\n if message_type == ConnectionInitMessage.type:\n handler = self.handle_connection_init\n handler_arg = ConnectionInitMessage(**message)\n\n elif message_type == PingMessage.type:\n handler = self.handle_ping\n handler_arg = PingMessage(**message)\n\n elif message_type == PongMessage.type:\n handler = self.handle_pong\n handler_arg = PongMessage(**message)\n\n elif message_type == SubscribeMessage.type:\n handler = self.handle_subscribe\n payload = SubscribeMessagePayload(**message.pop(\"payload\"))\n handler_arg = SubscribeMessage(payload=payload, **message)\n\n elif message_type == CompleteMessage.type:\n handler = self.handle_complete\n handler_arg = CompleteMessage(**message)\n\n else:\n handler = self.handle_invalid_message\n handler_arg = f\"Unknown message type: {message_type}\"\n\n except (KeyError, TypeError):\n handler = self.handle_invalid_message\n handler_arg = \"Failed to parse message\"\n\n await handler(handler_arg)\n await self.reap_completed_tasks()\n\n async def handle_connection_init(self, message: ConnectionInitMessage) -> None:\n if self.connection_init_received:\n reason = \"Too many initialisation requests\"\n await self.close(code=4429, reason=reason)\n return\n\n self.connection_init_received = True\n await self.send_message(ConnectionAckMessage())\n self.connection_acknowledged = True\n\n async def handle_ping(self, message: PingMessage) -> None:\n await self.send_message(PongMessage())\n\n async def handle_pong(self, message: PongMessage) -> None:\n pass\n\n async def handle_subscribe(self, message: SubscribeMessage) -> None:\n if not self.connection_acknowledged:\n await self.close(code=4401, reason=\"Unauthorized\")\n return\n\n try:\n graphql_document = parse(message.payload.query)\n except GraphQLSyntaxError as exc:\n await self.close(code=4400, reason=exc.message)\n return\n\n try:\n operation_type = get_operation_type(\n graphql_document, message.payload.operationName\n )\n except RuntimeError:\n await self.close(code=4400, reason=\"Can't get GraphQL operation type\")\n return\n\n if message.id in self.subscriptions:\n reason = f\"Subscriber for {message.id} already exists\"\n await self.close(code=4409, reason=reason)\n return\n\n if self.debug: # pragma: no cover\n pretty_print_graphql_operation(\n message.payload.operationName,\n message.payload.query,\n message.payload.variables,\n )\n\n context = await self.get_context()\n root_value = await self.get_root_value()\n\n # Get an AsyncGenerator yielding the results\n if operation_type == OperationType.SUBSCRIPTION:\n result_source = await self.schema.subscribe(\n query=message.payload.query,\n variable_values=message.payload.variables,\n operation_name=message.payload.operationName,\n context_value=context,\n root_value=root_value,\n )\n else:\n # create AsyncGenerator returning a single result\n async def get_result_source():\n yield await self.schema.execute(\n query=message.payload.query,\n variable_values=message.payload.variables,\n context_value=context,\n root_value=root_value,\n operation_name=message.payload.operationName,\n )\n\n result_source = get_result_source()\n\n # Handle initial validation errors\n if isinstance(result_source, GraphQLExecutionResult):\n assert result_source.errors\n payload = [format_graphql_error(result_source.errors[0])]\n await self.send_message(ErrorMessage(id=message.id, payload=payload))\n self.schema.process_errors(result_source.errors)\n return\n\n # Create task to handle this subscription, reserve the operation ID\n self.subscriptions[message.id] = result_source\n self.tasks[message.id] = asyncio.create_task(\n self.operation_task(result_source, message.id)\n )\n\n async def operation_task(\n self, result_source: AsyncGenerator, operation_id: str\n ) -> None:\n \"\"\"\n Operation task top level method. Cleans up and de-registers the operation\n once it is done.\n \"\"\"\n try:\n await self.handle_async_results(result_source, operation_id)\n except BaseException: # pragma: no cover\n # cleanup in case of something really unexpected\n del self.subscriptions[operation_id]\n del self.tasks[operation_id]\n raise\n else:\n # de-register the operation _before_ sending the `Complete` message\n # to make the `operation_id` immediately available for re-use\n del self.subscriptions[operation_id]\n del self.tasks[operation_id]\n await self.send_message(CompleteMessage(id=operation_id))\n finally:\n # add this task to a list to be reaped later\n task = asyncio.current_task()\n assert task is not None\n self.completed_tasks.append(task)\n\n async def handle_async_results(\n self,\n result_source: AsyncGenerator,\n operation_id: str,\n ) -> None:\n try:\n async for result in result_source:\n if result.errors:\n error_payload = [format_graphql_error(err) for err in result.errors]\n error_message = ErrorMessage(id=operation_id, payload=error_payload)\n await self.send_message(error_message)\n self.schema.process_errors(result.errors)\n return\n else:\n next_payload = {\"data\": result.data}\n next_message = NextMessage(id=operation_id, payload=next_payload)\n await self.send_message(next_message)\n except asyncio.CancelledError:\n # CancelledErrors are expected during task cleanup.\n return\n except Exception as error:\n # GraphQLErrors are handled by graphql-core and included in the\n # ExecutionResult\n error = GraphQLError(str(error), original_error=error)\n error_payload = [format_graphql_error(error)]\n error_message = ErrorMessage(id=operation_id, payload=error_payload)\n await self.send_message(error_message)\n self.schema.process_errors([error])\n return\n\n async def handle_complete(self, message: CompleteMessage) -> None:\n await self.cleanup_operation(operation_id=message.id)\n\n async def handle_invalid_message(self, error_message: str) -> None:\n await self.close(code=4400, reason=error_message)\n\n async def send_message(self, message: GraphQLTransportMessage) -> None:\n data = message.as_dict()\n await self.send_json(data)\n\n async def cleanup_operation(self, operation_id: str) -> None:\n if operation_id not in self.subscriptions:\n return\n generator = self.subscriptions.pop(operation_id)\n task = self.tasks.pop(operation_id)\n # since python 3.8, generators cannot be reliably closed\n with suppress(RuntimeError):\n await generator.aclose()\n task.cancel()\n with suppress(BaseException):\n await task\n\n async def reap_completed_tasks(self) -> None:\n \"\"\"\n Await tasks that have completed\n \"\"\"\n tasks, self.completed_tasks = self.completed_tasks, []\n for task in tasks:\n with suppress(BaseException):\n await task\n", "path": "strawberry/subscriptions/protocols/graphql_transport_ws/handlers.py"}], "after_files": [{"content": "import asyncio\nfrom abc import ABC, abstractmethod\nfrom contextlib import suppress\nfrom datetime import timedelta\nfrom typing import Any, AsyncGenerator, Callable, Dict, List, Optional\n\nfrom graphql import (\n ExecutionResult as GraphQLExecutionResult,\n GraphQLError,\n GraphQLSyntaxError,\n parse,\n)\nfrom graphql.error.graphql_error import format_error as format_graphql_error\n\nfrom strawberry.schema import BaseSchema\nfrom strawberry.subscriptions.protocols.graphql_transport_ws.types import (\n CompleteMessage,\n ConnectionAckMessage,\n ConnectionInitMessage,\n ErrorMessage,\n GraphQLTransportMessage,\n NextMessage,\n PingMessage,\n PongMessage,\n SubscribeMessage,\n SubscribeMessagePayload,\n)\nfrom strawberry.types.graphql import OperationType\nfrom strawberry.utils.debug import pretty_print_graphql_operation\nfrom strawberry.utils.operation import get_operation_type\n\n\nclass BaseGraphQLTransportWSHandler(ABC):\n def __init__(\n self,\n schema: BaseSchema,\n debug: bool,\n connection_init_wait_timeout: timedelta,\n ):\n self.schema = schema\n self.debug = debug\n self.connection_init_wait_timeout = connection_init_wait_timeout\n self.connection_init_timeout_task: Optional[asyncio.Task] = None\n self.connection_init_received = False\n self.connection_acknowledged = False\n self.subscriptions: Dict[str, AsyncGenerator] = {}\n self.tasks: Dict[str, asyncio.Task] = {}\n self.completed_tasks: List[asyncio.Task] = []\n\n @abstractmethod\n async def get_context(self) -> Any:\n \"\"\"Return the operations context\"\"\"\n\n @abstractmethod\n async def get_root_value(self) -> Any:\n \"\"\"Return the schemas root value\"\"\"\n\n @abstractmethod\n async def send_json(self, data: dict) -> None:\n \"\"\"Send the data JSON encoded to the WebSocket client\"\"\"\n\n @abstractmethod\n async def close(self, code: int, reason: str) -> None:\n \"\"\"Close the WebSocket with the passed code and reason\"\"\"\n\n @abstractmethod\n async def handle_request(self) -> Any:\n \"\"\"Handle the request this instance was created for\"\"\"\n\n async def handle(self) -> Any:\n timeout_handler = self.handle_connection_init_timeout()\n self.connection_init_timeout_task = asyncio.create_task(timeout_handler)\n return await self.handle_request()\n\n async def handle_connection_init_timeout(self):\n delay = self.connection_init_wait_timeout.total_seconds()\n await asyncio.sleep(delay=delay)\n\n if self.connection_init_received:\n return\n\n reason = \"Connection initialisation timeout\"\n await self.close(code=4408, reason=reason)\n\n async def handle_message(self, message: dict):\n handler: Callable\n handler_arg: Any\n try:\n message_type = message.pop(\"type\")\n\n if message_type == ConnectionInitMessage.type:\n handler = self.handle_connection_init\n handler_arg = ConnectionInitMessage(**message)\n\n elif message_type == PingMessage.type:\n handler = self.handle_ping\n handler_arg = PingMessage(**message)\n\n elif message_type == PongMessage.type:\n handler = self.handle_pong\n handler_arg = PongMessage(**message)\n\n elif message_type == SubscribeMessage.type:\n handler = self.handle_subscribe\n payload = SubscribeMessagePayload(**message.pop(\"payload\"))\n handler_arg = SubscribeMessage(payload=payload, **message)\n\n elif message_type == CompleteMessage.type:\n handler = self.handle_complete\n handler_arg = CompleteMessage(**message)\n\n else:\n handler = self.handle_invalid_message\n handler_arg = f\"Unknown message type: {message_type}\"\n\n except (KeyError, TypeError):\n handler = self.handle_invalid_message\n handler_arg = \"Failed to parse message\"\n\n await handler(handler_arg)\n await self.reap_completed_tasks()\n\n async def handle_connection_init(self, message: ConnectionInitMessage) -> None:\n if self.connection_init_received:\n reason = \"Too many initialisation requests\"\n await self.close(code=4429, reason=reason)\n return\n\n self.connection_init_received = True\n await self.send_message(ConnectionAckMessage())\n self.connection_acknowledged = True\n\n async def handle_ping(self, message: PingMessage) -> None:\n await self.send_message(PongMessage())\n\n async def handle_pong(self, message: PongMessage) -> None:\n pass\n\n async def handle_subscribe(self, message: SubscribeMessage) -> None:\n if not self.connection_acknowledged:\n await self.close(code=4401, reason=\"Unauthorized\")\n return\n\n try:\n graphql_document = parse(message.payload.query)\n except GraphQLSyntaxError as exc:\n await self.close(code=4400, reason=exc.message)\n return\n\n try:\n operation_type = get_operation_type(\n graphql_document, message.payload.operationName\n )\n except RuntimeError:\n await self.close(code=4400, reason=\"Can't get GraphQL operation type\")\n return\n\n if message.id in self.subscriptions:\n reason = f\"Subscriber for {message.id} already exists\"\n await self.close(code=4409, reason=reason)\n return\n\n if self.debug: # pragma: no cover\n pretty_print_graphql_operation(\n message.payload.operationName,\n message.payload.query,\n message.payload.variables,\n )\n\n context = await self.get_context()\n root_value = await self.get_root_value()\n\n # Get an AsyncGenerator yielding the results\n if operation_type == OperationType.SUBSCRIPTION:\n result_source = await self.schema.subscribe(\n query=message.payload.query,\n variable_values=message.payload.variables,\n operation_name=message.payload.operationName,\n context_value=context,\n root_value=root_value,\n )\n else:\n # create AsyncGenerator returning a single result\n async def get_result_source():\n yield await self.schema.execute(\n query=message.payload.query,\n variable_values=message.payload.variables,\n context_value=context,\n root_value=root_value,\n operation_name=message.payload.operationName,\n )\n\n result_source = get_result_source()\n\n # Handle initial validation errors\n if isinstance(result_source, GraphQLExecutionResult):\n assert result_source.errors\n payload = [format_graphql_error(result_source.errors[0])]\n await self.send_message(ErrorMessage(id=message.id, payload=payload))\n self.schema.process_errors(result_source.errors)\n return\n\n # Create task to handle this subscription, reserve the operation ID\n self.subscriptions[message.id] = result_source\n self.tasks[message.id] = asyncio.create_task(\n self.operation_task(result_source, message.id)\n )\n\n async def operation_task(\n self, result_source: AsyncGenerator, operation_id: str\n ) -> None:\n \"\"\"\n Operation task top level method. Cleans up and de-registers the operation\n once it is done.\n \"\"\"\n try:\n await self.handle_async_results(result_source, operation_id)\n except BaseException: # pragma: no cover\n # cleanup in case of something really unexpected\n # wait for generator to be closed to ensure that any existing\n # 'finally' statement is called\n result_source = self.subscriptions[operation_id]\n with suppress(RuntimeError):\n await result_source.aclose()\n del self.subscriptions[operation_id]\n del self.tasks[operation_id]\n raise\n else:\n # de-register the operation _before_ sending the `Complete` message\n # to make the `operation_id` immediately available for re-use\n del self.subscriptions[operation_id]\n del self.tasks[operation_id]\n await self.send_message(CompleteMessage(id=operation_id))\n finally:\n # add this task to a list to be reaped later\n task = asyncio.current_task()\n assert task is not None\n self.completed_tasks.append(task)\n\n async def handle_async_results(\n self,\n result_source: AsyncGenerator,\n operation_id: str,\n ) -> None:\n try:\n async for result in result_source:\n if result.errors:\n error_payload = [format_graphql_error(err) for err in result.errors]\n error_message = ErrorMessage(id=operation_id, payload=error_payload)\n await self.send_message(error_message)\n self.schema.process_errors(result.errors)\n return\n else:\n next_payload = {\"data\": result.data}\n next_message = NextMessage(id=operation_id, payload=next_payload)\n await self.send_message(next_message)\n except asyncio.CancelledError:\n # CancelledErrors are expected during task cleanup.\n return\n except Exception as error:\n # GraphQLErrors are handled by graphql-core and included in the\n # ExecutionResult\n error = GraphQLError(str(error), original_error=error)\n error_payload = [format_graphql_error(error)]\n error_message = ErrorMessage(id=operation_id, payload=error_payload)\n await self.send_message(error_message)\n self.schema.process_errors([error])\n return\n\n async def handle_complete(self, message: CompleteMessage) -> None:\n await self.cleanup_operation(operation_id=message.id)\n\n async def handle_invalid_message(self, error_message: str) -> None:\n await self.close(code=4400, reason=error_message)\n\n async def send_message(self, message: GraphQLTransportMessage) -> None:\n data = message.as_dict()\n await self.send_json(data)\n\n async def cleanup_operation(self, operation_id: str) -> None:\n if operation_id not in self.subscriptions:\n return\n result_source = self.subscriptions.pop(operation_id)\n task = self.tasks.pop(operation_id)\n task.cancel()\n with suppress(BaseException):\n await task\n # since python 3.8, generators cannot be reliably closed\n with suppress(RuntimeError):\n await result_source.aclose()\n\n async def reap_completed_tasks(self) -> None:\n \"\"\"\n Await tasks that have completed\n \"\"\"\n tasks, self.completed_tasks = self.completed_tasks, []\n for task in tasks:\n with suppress(BaseException):\n await task\n", "path": "strawberry/subscriptions/protocols/graphql_transport_ws/handlers.py"}]}
| 3,746 | 353 |
gh_patches_debug_11326
|
rasdani/github-patches
|
git_diff
|
tobymao__sqlglot-2443
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Erroneous handling of redshift's JSON_PARSE
**sqlglot version: 18.16.1**
**Fully reproducible code snippet**
```python
import sqlglot
sql = "SELECT JSON_PARSE('[10001,10002,\"abc\"]');"
parsed = sqlglot.parse_one(sql,dialect="redshift")
parsed.sql(dialect="redshift")
#'SELECT PARSE_JSON(\'[10001,10002,"abc"]\')'
```
The generated sql triggers an error when executed in redshift
>Failed to execute query: ERROR: function parse_json("unknown") does not exist
Hint: No function matches the given name and argument types. You may need to add explicit type casts.
**Official Documentation**
https://docs.aws.amazon.com/redshift/latest/dg/JSON_PARSE.html
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sqlglot/dialects/redshift.py`
Content:
```
1 from __future__ import annotations
2
3 import typing as t
4
5 from sqlglot import exp, transforms
6 from sqlglot.dialects.dialect import (
7 concat_to_dpipe_sql,
8 concat_ws_to_dpipe_sql,
9 generatedasidentitycolumnconstraint_sql,
10 rename_func,
11 ts_or_ds_to_date_sql,
12 )
13 from sqlglot.dialects.postgres import Postgres
14 from sqlglot.helper import seq_get
15 from sqlglot.tokens import TokenType
16
17
18 def _json_sql(self: Redshift.Generator, expression: exp.JSONExtract | exp.JSONExtractScalar) -> str:
19 return f'{self.sql(expression, "this")}."{expression.expression.name}"'
20
21
22 def _parse_date_add(args: t.List) -> exp.DateAdd:
23 return exp.DateAdd(
24 this=exp.TsOrDsToDate(this=seq_get(args, 2)),
25 expression=seq_get(args, 1),
26 unit=seq_get(args, 0),
27 )
28
29
30 class Redshift(Postgres):
31 # https://docs.aws.amazon.com/redshift/latest/dg/r_names.html
32 RESOLVES_IDENTIFIERS_AS_UPPERCASE = None
33
34 SUPPORTS_USER_DEFINED_TYPES = False
35 INDEX_OFFSET = 0
36
37 TIME_FORMAT = "'YYYY-MM-DD HH:MI:SS'"
38 TIME_MAPPING = {
39 **Postgres.TIME_MAPPING,
40 "MON": "%b",
41 "HH": "%H",
42 }
43
44 class Parser(Postgres.Parser):
45 FUNCTIONS = {
46 **Postgres.Parser.FUNCTIONS,
47 "ADD_MONTHS": lambda args: exp.DateAdd(
48 this=exp.TsOrDsToDate(this=seq_get(args, 0)),
49 expression=seq_get(args, 1),
50 unit=exp.var("month"),
51 ),
52 "DATEADD": _parse_date_add,
53 "DATE_ADD": _parse_date_add,
54 "DATEDIFF": lambda args: exp.DateDiff(
55 this=exp.TsOrDsToDate(this=seq_get(args, 2)),
56 expression=exp.TsOrDsToDate(this=seq_get(args, 1)),
57 unit=seq_get(args, 0),
58 ),
59 "STRTOL": exp.FromBase.from_arg_list,
60 }
61
62 NO_PAREN_FUNCTION_PARSERS = {
63 **Postgres.Parser.NO_PAREN_FUNCTION_PARSERS,
64 "APPROXIMATE": lambda self: self._parse_approximate_count(),
65 }
66
67 def _parse_table(
68 self,
69 schema: bool = False,
70 joins: bool = False,
71 alias_tokens: t.Optional[t.Collection[TokenType]] = None,
72 parse_bracket: bool = False,
73 ) -> t.Optional[exp.Expression]:
74 # Redshift supports UNPIVOTing SUPER objects, e.g. `UNPIVOT foo.obj[0] AS val AT attr`
75 unpivot = self._match(TokenType.UNPIVOT)
76 table = super()._parse_table(
77 schema=schema,
78 joins=joins,
79 alias_tokens=alias_tokens,
80 parse_bracket=parse_bracket,
81 )
82
83 return self.expression(exp.Pivot, this=table, unpivot=True) if unpivot else table
84
85 def _parse_types(
86 self, check_func: bool = False, schema: bool = False, allow_identifiers: bool = True
87 ) -> t.Optional[exp.Expression]:
88 this = super()._parse_types(
89 check_func=check_func, schema=schema, allow_identifiers=allow_identifiers
90 )
91
92 if (
93 isinstance(this, exp.DataType)
94 and this.is_type("varchar")
95 and this.expressions
96 and this.expressions[0].this == exp.column("MAX")
97 ):
98 this.set("expressions", [exp.var("MAX")])
99
100 return this
101
102 def _parse_convert(
103 self, strict: bool, safe: t.Optional[bool] = None
104 ) -> t.Optional[exp.Expression]:
105 to = self._parse_types()
106 self._match(TokenType.COMMA)
107 this = self._parse_bitwise()
108 return self.expression(exp.TryCast, this=this, to=to, safe=safe)
109
110 def _parse_approximate_count(self) -> t.Optional[exp.ApproxDistinct]:
111 index = self._index - 1
112 func = self._parse_function()
113
114 if isinstance(func, exp.Count) and isinstance(func.this, exp.Distinct):
115 return self.expression(exp.ApproxDistinct, this=seq_get(func.this.expressions, 0))
116 self._retreat(index)
117 return None
118
119 class Tokenizer(Postgres.Tokenizer):
120 BIT_STRINGS = []
121 HEX_STRINGS = []
122 STRING_ESCAPES = ["\\", "'"]
123
124 KEYWORDS = {
125 **Postgres.Tokenizer.KEYWORDS,
126 "HLLSKETCH": TokenType.HLLSKETCH,
127 "SUPER": TokenType.SUPER,
128 "SYSDATE": TokenType.CURRENT_TIMESTAMP,
129 "TOP": TokenType.TOP,
130 "UNLOAD": TokenType.COMMAND,
131 "VARBYTE": TokenType.VARBINARY,
132 }
133
134 # Redshift allows # to appear as a table identifier prefix
135 SINGLE_TOKENS = Postgres.Tokenizer.SINGLE_TOKENS.copy()
136 SINGLE_TOKENS.pop("#")
137
138 class Generator(Postgres.Generator):
139 LOCKING_READS_SUPPORTED = False
140 RENAME_TABLE_WITH_DB = False
141 QUERY_HINTS = False
142 VALUES_AS_TABLE = False
143 TZ_TO_WITH_TIME_ZONE = True
144 NVL2_SUPPORTED = True
145
146 TYPE_MAPPING = {
147 **Postgres.Generator.TYPE_MAPPING,
148 exp.DataType.Type.BINARY: "VARBYTE",
149 exp.DataType.Type.INT: "INTEGER",
150 exp.DataType.Type.TIMETZ: "TIME",
151 exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP",
152 exp.DataType.Type.VARBINARY: "VARBYTE",
153 }
154
155 PROPERTIES_LOCATION = {
156 **Postgres.Generator.PROPERTIES_LOCATION,
157 exp.LikeProperty: exp.Properties.Location.POST_WITH,
158 }
159
160 TRANSFORMS = {
161 **Postgres.Generator.TRANSFORMS,
162 exp.Concat: concat_to_dpipe_sql,
163 exp.ConcatWs: concat_ws_to_dpipe_sql,
164 exp.ApproxDistinct: lambda self, e: f"APPROXIMATE COUNT(DISTINCT {self.sql(e, 'this')})",
165 exp.CurrentTimestamp: lambda self, e: "SYSDATE",
166 exp.DateAdd: lambda self, e: self.func(
167 "DATEADD", exp.var(e.text("unit") or "day"), e.expression, e.this
168 ),
169 exp.DateDiff: lambda self, e: self.func(
170 "DATEDIFF", exp.var(e.text("unit") or "day"), e.expression, e.this
171 ),
172 exp.DistKeyProperty: lambda self, e: f"DISTKEY({e.name})",
173 exp.DistStyleProperty: lambda self, e: self.naked_property(e),
174 exp.FromBase: rename_func("STRTOL"),
175 exp.GeneratedAsIdentityColumnConstraint: generatedasidentitycolumnconstraint_sql,
176 exp.JSONExtract: _json_sql,
177 exp.JSONExtractScalar: _json_sql,
178 exp.SafeConcat: concat_to_dpipe_sql,
179 exp.Select: transforms.preprocess(
180 [transforms.eliminate_distinct_on, transforms.eliminate_semi_and_anti_joins]
181 ),
182 exp.SortKeyProperty: lambda self, e: f"{'COMPOUND ' if e.args['compound'] else ''}SORTKEY({self.format_args(*e.this)})",
183 exp.TsOrDsToDate: ts_or_ds_to_date_sql("redshift"),
184 }
185
186 # Postgres maps exp.Pivot to no_pivot_sql, but Redshift support pivots
187 TRANSFORMS.pop(exp.Pivot)
188
189 # Redshift uses the POW | POWER (expr1, expr2) syntax instead of expr1 ^ expr2 (postgres)
190 TRANSFORMS.pop(exp.Pow)
191
192 # Redshift supports ANY_VALUE(..)
193 TRANSFORMS.pop(exp.AnyValue)
194
195 RESERVED_KEYWORDS = {*Postgres.Generator.RESERVED_KEYWORDS, "snapshot", "type"}
196
197 def with_properties(self, properties: exp.Properties) -> str:
198 """Redshift doesn't have `WITH` as part of their with_properties so we remove it"""
199 return self.properties(properties, prefix=" ", suffix="")
200
201 def datatype_sql(self, expression: exp.DataType) -> str:
202 """
203 Redshift converts the `TEXT` data type to `VARCHAR(255)` by default when people more generally mean
204 VARCHAR of max length which is `VARCHAR(max)` in Redshift. Therefore if we get a `TEXT` data type
205 without precision we convert it to `VARCHAR(max)` and if it does have precision then we just convert
206 `TEXT` to `VARCHAR`.
207 """
208 if expression.is_type("text"):
209 expression = expression.copy()
210 expression.set("this", exp.DataType.Type.VARCHAR)
211 precision = expression.args.get("expressions")
212
213 if not precision:
214 expression.append("expressions", exp.var("MAX"))
215
216 return super().datatype_sql(expression)
217
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/sqlglot/dialects/redshift.py b/sqlglot/dialects/redshift.py
--- a/sqlglot/dialects/redshift.py
+++ b/sqlglot/dialects/redshift.py
@@ -175,6 +175,7 @@
exp.GeneratedAsIdentityColumnConstraint: generatedasidentitycolumnconstraint_sql,
exp.JSONExtract: _json_sql,
exp.JSONExtractScalar: _json_sql,
+ exp.ParseJSON: rename_func("JSON_PARSE"),
exp.SafeConcat: concat_to_dpipe_sql,
exp.Select: transforms.preprocess(
[transforms.eliminate_distinct_on, transforms.eliminate_semi_and_anti_joins]
|
{"golden_diff": "diff --git a/sqlglot/dialects/redshift.py b/sqlglot/dialects/redshift.py\n--- a/sqlglot/dialects/redshift.py\n+++ b/sqlglot/dialects/redshift.py\n@@ -175,6 +175,7 @@\n exp.GeneratedAsIdentityColumnConstraint: generatedasidentitycolumnconstraint_sql,\n exp.JSONExtract: _json_sql,\n exp.JSONExtractScalar: _json_sql,\n+ exp.ParseJSON: rename_func(\"JSON_PARSE\"),\n exp.SafeConcat: concat_to_dpipe_sql,\n exp.Select: transforms.preprocess(\n [transforms.eliminate_distinct_on, transforms.eliminate_semi_and_anti_joins]\n", "issue": "Erroneous handling of redshift's JSON_PARSE\n**sqlglot version: 18.16.1**\r\n\r\n**Fully reproducible code snippet**\r\n```python\r\nimport sqlglot\r\nsql = \"SELECT JSON_PARSE('[10001,10002,\\\"abc\\\"]');\"\r\nparsed = sqlglot.parse_one(sql,dialect=\"redshift\")\r\nparsed.sql(dialect=\"redshift\")\r\n#'SELECT PARSE_JSON(\\'[10001,10002,\"abc\"]\\')'\r\n```\r\nThe generated sql triggers an error when executed in redshift\r\n\r\n>Failed to execute query: ERROR: function parse_json(\"unknown\") does not exist\r\n Hint: No function matches the given name and argument types. You may need to add explicit type casts.\r\n\r\n**Official Documentation**\r\nhttps://docs.aws.amazon.com/redshift/latest/dg/JSON_PARSE.html\r\n\n", "before_files": [{"content": "from __future__ import annotations\n\nimport typing as t\n\nfrom sqlglot import exp, transforms\nfrom sqlglot.dialects.dialect import (\n concat_to_dpipe_sql,\n concat_ws_to_dpipe_sql,\n generatedasidentitycolumnconstraint_sql,\n rename_func,\n ts_or_ds_to_date_sql,\n)\nfrom sqlglot.dialects.postgres import Postgres\nfrom sqlglot.helper import seq_get\nfrom sqlglot.tokens import TokenType\n\n\ndef _json_sql(self: Redshift.Generator, expression: exp.JSONExtract | exp.JSONExtractScalar) -> str:\n return f'{self.sql(expression, \"this\")}.\"{expression.expression.name}\"'\n\n\ndef _parse_date_add(args: t.List) -> exp.DateAdd:\n return exp.DateAdd(\n this=exp.TsOrDsToDate(this=seq_get(args, 2)),\n expression=seq_get(args, 1),\n unit=seq_get(args, 0),\n )\n\n\nclass Redshift(Postgres):\n # https://docs.aws.amazon.com/redshift/latest/dg/r_names.html\n RESOLVES_IDENTIFIERS_AS_UPPERCASE = None\n\n SUPPORTS_USER_DEFINED_TYPES = False\n INDEX_OFFSET = 0\n\n TIME_FORMAT = \"'YYYY-MM-DD HH:MI:SS'\"\n TIME_MAPPING = {\n **Postgres.TIME_MAPPING,\n \"MON\": \"%b\",\n \"HH\": \"%H\",\n }\n\n class Parser(Postgres.Parser):\n FUNCTIONS = {\n **Postgres.Parser.FUNCTIONS,\n \"ADD_MONTHS\": lambda args: exp.DateAdd(\n this=exp.TsOrDsToDate(this=seq_get(args, 0)),\n expression=seq_get(args, 1),\n unit=exp.var(\"month\"),\n ),\n \"DATEADD\": _parse_date_add,\n \"DATE_ADD\": _parse_date_add,\n \"DATEDIFF\": lambda args: exp.DateDiff(\n this=exp.TsOrDsToDate(this=seq_get(args, 2)),\n expression=exp.TsOrDsToDate(this=seq_get(args, 1)),\n unit=seq_get(args, 0),\n ),\n \"STRTOL\": exp.FromBase.from_arg_list,\n }\n\n NO_PAREN_FUNCTION_PARSERS = {\n **Postgres.Parser.NO_PAREN_FUNCTION_PARSERS,\n \"APPROXIMATE\": lambda self: self._parse_approximate_count(),\n }\n\n def _parse_table(\n self,\n schema: bool = False,\n joins: bool = False,\n alias_tokens: t.Optional[t.Collection[TokenType]] = None,\n parse_bracket: bool = False,\n ) -> t.Optional[exp.Expression]:\n # Redshift supports UNPIVOTing SUPER objects, e.g. `UNPIVOT foo.obj[0] AS val AT attr`\n unpivot = self._match(TokenType.UNPIVOT)\n table = super()._parse_table(\n schema=schema,\n joins=joins,\n alias_tokens=alias_tokens,\n parse_bracket=parse_bracket,\n )\n\n return self.expression(exp.Pivot, this=table, unpivot=True) if unpivot else table\n\n def _parse_types(\n self, check_func: bool = False, schema: bool = False, allow_identifiers: bool = True\n ) -> t.Optional[exp.Expression]:\n this = super()._parse_types(\n check_func=check_func, schema=schema, allow_identifiers=allow_identifiers\n )\n\n if (\n isinstance(this, exp.DataType)\n and this.is_type(\"varchar\")\n and this.expressions\n and this.expressions[0].this == exp.column(\"MAX\")\n ):\n this.set(\"expressions\", [exp.var(\"MAX\")])\n\n return this\n\n def _parse_convert(\n self, strict: bool, safe: t.Optional[bool] = None\n ) -> t.Optional[exp.Expression]:\n to = self._parse_types()\n self._match(TokenType.COMMA)\n this = self._parse_bitwise()\n return self.expression(exp.TryCast, this=this, to=to, safe=safe)\n\n def _parse_approximate_count(self) -> t.Optional[exp.ApproxDistinct]:\n index = self._index - 1\n func = self._parse_function()\n\n if isinstance(func, exp.Count) and isinstance(func.this, exp.Distinct):\n return self.expression(exp.ApproxDistinct, this=seq_get(func.this.expressions, 0))\n self._retreat(index)\n return None\n\n class Tokenizer(Postgres.Tokenizer):\n BIT_STRINGS = []\n HEX_STRINGS = []\n STRING_ESCAPES = [\"\\\\\", \"'\"]\n\n KEYWORDS = {\n **Postgres.Tokenizer.KEYWORDS,\n \"HLLSKETCH\": TokenType.HLLSKETCH,\n \"SUPER\": TokenType.SUPER,\n \"SYSDATE\": TokenType.CURRENT_TIMESTAMP,\n \"TOP\": TokenType.TOP,\n \"UNLOAD\": TokenType.COMMAND,\n \"VARBYTE\": TokenType.VARBINARY,\n }\n\n # Redshift allows # to appear as a table identifier prefix\n SINGLE_TOKENS = Postgres.Tokenizer.SINGLE_TOKENS.copy()\n SINGLE_TOKENS.pop(\"#\")\n\n class Generator(Postgres.Generator):\n LOCKING_READS_SUPPORTED = False\n RENAME_TABLE_WITH_DB = False\n QUERY_HINTS = False\n VALUES_AS_TABLE = False\n TZ_TO_WITH_TIME_ZONE = True\n NVL2_SUPPORTED = True\n\n TYPE_MAPPING = {\n **Postgres.Generator.TYPE_MAPPING,\n exp.DataType.Type.BINARY: \"VARBYTE\",\n exp.DataType.Type.INT: \"INTEGER\",\n exp.DataType.Type.TIMETZ: \"TIME\",\n exp.DataType.Type.TIMESTAMPTZ: \"TIMESTAMP\",\n exp.DataType.Type.VARBINARY: \"VARBYTE\",\n }\n\n PROPERTIES_LOCATION = {\n **Postgres.Generator.PROPERTIES_LOCATION,\n exp.LikeProperty: exp.Properties.Location.POST_WITH,\n }\n\n TRANSFORMS = {\n **Postgres.Generator.TRANSFORMS,\n exp.Concat: concat_to_dpipe_sql,\n exp.ConcatWs: concat_ws_to_dpipe_sql,\n exp.ApproxDistinct: lambda self, e: f\"APPROXIMATE COUNT(DISTINCT {self.sql(e, 'this')})\",\n exp.CurrentTimestamp: lambda self, e: \"SYSDATE\",\n exp.DateAdd: lambda self, e: self.func(\n \"DATEADD\", exp.var(e.text(\"unit\") or \"day\"), e.expression, e.this\n ),\n exp.DateDiff: lambda self, e: self.func(\n \"DATEDIFF\", exp.var(e.text(\"unit\") or \"day\"), e.expression, e.this\n ),\n exp.DistKeyProperty: lambda self, e: f\"DISTKEY({e.name})\",\n exp.DistStyleProperty: lambda self, e: self.naked_property(e),\n exp.FromBase: rename_func(\"STRTOL\"),\n exp.GeneratedAsIdentityColumnConstraint: generatedasidentitycolumnconstraint_sql,\n exp.JSONExtract: _json_sql,\n exp.JSONExtractScalar: _json_sql,\n exp.SafeConcat: concat_to_dpipe_sql,\n exp.Select: transforms.preprocess(\n [transforms.eliminate_distinct_on, transforms.eliminate_semi_and_anti_joins]\n ),\n exp.SortKeyProperty: lambda self, e: f\"{'COMPOUND ' if e.args['compound'] else ''}SORTKEY({self.format_args(*e.this)})\",\n exp.TsOrDsToDate: ts_or_ds_to_date_sql(\"redshift\"),\n }\n\n # Postgres maps exp.Pivot to no_pivot_sql, but Redshift support pivots\n TRANSFORMS.pop(exp.Pivot)\n\n # Redshift uses the POW | POWER (expr1, expr2) syntax instead of expr1 ^ expr2 (postgres)\n TRANSFORMS.pop(exp.Pow)\n\n # Redshift supports ANY_VALUE(..)\n TRANSFORMS.pop(exp.AnyValue)\n\n RESERVED_KEYWORDS = {*Postgres.Generator.RESERVED_KEYWORDS, \"snapshot\", \"type\"}\n\n def with_properties(self, properties: exp.Properties) -> str:\n \"\"\"Redshift doesn't have `WITH` as part of their with_properties so we remove it\"\"\"\n return self.properties(properties, prefix=\" \", suffix=\"\")\n\n def datatype_sql(self, expression: exp.DataType) -> str:\n \"\"\"\n Redshift converts the `TEXT` data type to `VARCHAR(255)` by default when people more generally mean\n VARCHAR of max length which is `VARCHAR(max)` in Redshift. Therefore if we get a `TEXT` data type\n without precision we convert it to `VARCHAR(max)` and if it does have precision then we just convert\n `TEXT` to `VARCHAR`.\n \"\"\"\n if expression.is_type(\"text\"):\n expression = expression.copy()\n expression.set(\"this\", exp.DataType.Type.VARCHAR)\n precision = expression.args.get(\"expressions\")\n\n if not precision:\n expression.append(\"expressions\", exp.var(\"MAX\"))\n\n return super().datatype_sql(expression)\n", "path": "sqlglot/dialects/redshift.py"}], "after_files": [{"content": "from __future__ import annotations\n\nimport typing as t\n\nfrom sqlglot import exp, transforms\nfrom sqlglot.dialects.dialect import (\n concat_to_dpipe_sql,\n concat_ws_to_dpipe_sql,\n generatedasidentitycolumnconstraint_sql,\n rename_func,\n ts_or_ds_to_date_sql,\n)\nfrom sqlglot.dialects.postgres import Postgres\nfrom sqlglot.helper import seq_get\nfrom sqlglot.tokens import TokenType\n\n\ndef _json_sql(self: Redshift.Generator, expression: exp.JSONExtract | exp.JSONExtractScalar) -> str:\n return f'{self.sql(expression, \"this\")}.\"{expression.expression.name}\"'\n\n\ndef _parse_date_add(args: t.List) -> exp.DateAdd:\n return exp.DateAdd(\n this=exp.TsOrDsToDate(this=seq_get(args, 2)),\n expression=seq_get(args, 1),\n unit=seq_get(args, 0),\n )\n\n\nclass Redshift(Postgres):\n # https://docs.aws.amazon.com/redshift/latest/dg/r_names.html\n RESOLVES_IDENTIFIERS_AS_UPPERCASE = None\n\n SUPPORTS_USER_DEFINED_TYPES = False\n INDEX_OFFSET = 0\n\n TIME_FORMAT = \"'YYYY-MM-DD HH:MI:SS'\"\n TIME_MAPPING = {\n **Postgres.TIME_MAPPING,\n \"MON\": \"%b\",\n \"HH\": \"%H\",\n }\n\n class Parser(Postgres.Parser):\n FUNCTIONS = {\n **Postgres.Parser.FUNCTIONS,\n \"ADD_MONTHS\": lambda args: exp.DateAdd(\n this=exp.TsOrDsToDate(this=seq_get(args, 0)),\n expression=seq_get(args, 1),\n unit=exp.var(\"month\"),\n ),\n \"DATEADD\": _parse_date_add,\n \"DATE_ADD\": _parse_date_add,\n \"DATEDIFF\": lambda args: exp.DateDiff(\n this=exp.TsOrDsToDate(this=seq_get(args, 2)),\n expression=exp.TsOrDsToDate(this=seq_get(args, 1)),\n unit=seq_get(args, 0),\n ),\n \"STRTOL\": exp.FromBase.from_arg_list,\n }\n\n NO_PAREN_FUNCTION_PARSERS = {\n **Postgres.Parser.NO_PAREN_FUNCTION_PARSERS,\n \"APPROXIMATE\": lambda self: self._parse_approximate_count(),\n }\n\n def _parse_table(\n self,\n schema: bool = False,\n joins: bool = False,\n alias_tokens: t.Optional[t.Collection[TokenType]] = None,\n parse_bracket: bool = False,\n ) -> t.Optional[exp.Expression]:\n # Redshift supports UNPIVOTing SUPER objects, e.g. `UNPIVOT foo.obj[0] AS val AT attr`\n unpivot = self._match(TokenType.UNPIVOT)\n table = super()._parse_table(\n schema=schema,\n joins=joins,\n alias_tokens=alias_tokens,\n parse_bracket=parse_bracket,\n )\n\n return self.expression(exp.Pivot, this=table, unpivot=True) if unpivot else table\n\n def _parse_types(\n self, check_func: bool = False, schema: bool = False, allow_identifiers: bool = True\n ) -> t.Optional[exp.Expression]:\n this = super()._parse_types(\n check_func=check_func, schema=schema, allow_identifiers=allow_identifiers\n )\n\n if (\n isinstance(this, exp.DataType)\n and this.is_type(\"varchar\")\n and this.expressions\n and this.expressions[0].this == exp.column(\"MAX\")\n ):\n this.set(\"expressions\", [exp.var(\"MAX\")])\n\n return this\n\n def _parse_convert(\n self, strict: bool, safe: t.Optional[bool] = None\n ) -> t.Optional[exp.Expression]:\n to = self._parse_types()\n self._match(TokenType.COMMA)\n this = self._parse_bitwise()\n return self.expression(exp.TryCast, this=this, to=to, safe=safe)\n\n def _parse_approximate_count(self) -> t.Optional[exp.ApproxDistinct]:\n index = self._index - 1\n func = self._parse_function()\n\n if isinstance(func, exp.Count) and isinstance(func.this, exp.Distinct):\n return self.expression(exp.ApproxDistinct, this=seq_get(func.this.expressions, 0))\n self._retreat(index)\n return None\n\n class Tokenizer(Postgres.Tokenizer):\n BIT_STRINGS = []\n HEX_STRINGS = []\n STRING_ESCAPES = [\"\\\\\", \"'\"]\n\n KEYWORDS = {\n **Postgres.Tokenizer.KEYWORDS,\n \"HLLSKETCH\": TokenType.HLLSKETCH,\n \"SUPER\": TokenType.SUPER,\n \"SYSDATE\": TokenType.CURRENT_TIMESTAMP,\n \"TOP\": TokenType.TOP,\n \"UNLOAD\": TokenType.COMMAND,\n \"VARBYTE\": TokenType.VARBINARY,\n }\n\n # Redshift allows # to appear as a table identifier prefix\n SINGLE_TOKENS = Postgres.Tokenizer.SINGLE_TOKENS.copy()\n SINGLE_TOKENS.pop(\"#\")\n\n class Generator(Postgres.Generator):\n LOCKING_READS_SUPPORTED = False\n RENAME_TABLE_WITH_DB = False\n QUERY_HINTS = False\n VALUES_AS_TABLE = False\n TZ_TO_WITH_TIME_ZONE = True\n NVL2_SUPPORTED = True\n\n TYPE_MAPPING = {\n **Postgres.Generator.TYPE_MAPPING,\n exp.DataType.Type.BINARY: \"VARBYTE\",\n exp.DataType.Type.INT: \"INTEGER\",\n exp.DataType.Type.TIMETZ: \"TIME\",\n exp.DataType.Type.TIMESTAMPTZ: \"TIMESTAMP\",\n exp.DataType.Type.VARBINARY: \"VARBYTE\",\n }\n\n PROPERTIES_LOCATION = {\n **Postgres.Generator.PROPERTIES_LOCATION,\n exp.LikeProperty: exp.Properties.Location.POST_WITH,\n }\n\n TRANSFORMS = {\n **Postgres.Generator.TRANSFORMS,\n exp.Concat: concat_to_dpipe_sql,\n exp.ConcatWs: concat_ws_to_dpipe_sql,\n exp.ApproxDistinct: lambda self, e: f\"APPROXIMATE COUNT(DISTINCT {self.sql(e, 'this')})\",\n exp.CurrentTimestamp: lambda self, e: \"SYSDATE\",\n exp.DateAdd: lambda self, e: self.func(\n \"DATEADD\", exp.var(e.text(\"unit\") or \"day\"), e.expression, e.this\n ),\n exp.DateDiff: lambda self, e: self.func(\n \"DATEDIFF\", exp.var(e.text(\"unit\") or \"day\"), e.expression, e.this\n ),\n exp.DistKeyProperty: lambda self, e: f\"DISTKEY({e.name})\",\n exp.DistStyleProperty: lambda self, e: self.naked_property(e),\n exp.FromBase: rename_func(\"STRTOL\"),\n exp.GeneratedAsIdentityColumnConstraint: generatedasidentitycolumnconstraint_sql,\n exp.JSONExtract: _json_sql,\n exp.JSONExtractScalar: _json_sql,\n exp.ParseJSON: rename_func(\"JSON_PARSE\"),\n exp.SafeConcat: concat_to_dpipe_sql,\n exp.Select: transforms.preprocess(\n [transforms.eliminate_distinct_on, transforms.eliminate_semi_and_anti_joins]\n ),\n exp.SortKeyProperty: lambda self, e: f\"{'COMPOUND ' if e.args['compound'] else ''}SORTKEY({self.format_args(*e.this)})\",\n exp.TsOrDsToDate: ts_or_ds_to_date_sql(\"redshift\"),\n }\n\n # Postgres maps exp.Pivot to no_pivot_sql, but Redshift support pivots\n TRANSFORMS.pop(exp.Pivot)\n\n # Redshift uses the POW | POWER (expr1, expr2) syntax instead of expr1 ^ expr2 (postgres)\n TRANSFORMS.pop(exp.Pow)\n\n # Redshift supports ANY_VALUE(..)\n TRANSFORMS.pop(exp.AnyValue)\n\n RESERVED_KEYWORDS = {*Postgres.Generator.RESERVED_KEYWORDS, \"snapshot\", \"type\"}\n\n def with_properties(self, properties: exp.Properties) -> str:\n \"\"\"Redshift doesn't have `WITH` as part of their with_properties so we remove it\"\"\"\n return self.properties(properties, prefix=\" \", suffix=\"\")\n\n def datatype_sql(self, expression: exp.DataType) -> str:\n \"\"\"\n Redshift converts the `TEXT` data type to `VARCHAR(255)` by default when people more generally mean\n VARCHAR of max length which is `VARCHAR(max)` in Redshift. Therefore if we get a `TEXT` data type\n without precision we convert it to `VARCHAR(max)` and if it does have precision then we just convert\n `TEXT` to `VARCHAR`.\n \"\"\"\n if expression.is_type(\"text\"):\n expression = expression.copy()\n expression.set(\"this\", exp.DataType.Type.VARCHAR)\n precision = expression.args.get(\"expressions\")\n\n if not precision:\n expression.append(\"expressions\", exp.var(\"MAX\"))\n\n return super().datatype_sql(expression)\n", "path": "sqlglot/dialects/redshift.py"}]}
| 2,942 | 149 |
gh_patches_debug_20828
|
rasdani/github-patches
|
git_diff
|
bookwyrm-social__bookwyrm-619
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Reading goal status doesn't set plurals correctly
When someone is only planning to read 1 book, the status should say "1 book" not "1 books"
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bookwyrm/views/goal.py`
Content:
```
1 ''' non-interactive pages '''
2 from django.contrib.auth.decorators import login_required
3 from django.http import HttpResponseNotFound
4 from django.shortcuts import redirect
5 from django.template.response import TemplateResponse
6 from django.utils.decorators import method_decorator
7 from django.views import View
8
9 from bookwyrm import forms, models
10 from bookwyrm.status import create_generated_note
11 from .helpers import get_user_from_username, object_visible_to_user
12
13
14 # pylint: disable= no-self-use
15 @method_decorator(login_required, name='dispatch')
16 class Goal(View):
17 ''' track books for the year '''
18 def get(self, request, username, year):
19 ''' reading goal page '''
20 user = get_user_from_username(username)
21 year = int(year)
22 goal = models.AnnualGoal.objects.filter(
23 year=year, user=user
24 ).first()
25 if not goal and user != request.user:
26 return HttpResponseNotFound()
27
28 if goal and not object_visible_to_user(request.user, goal):
29 return HttpResponseNotFound()
30
31 data = {
32 'title': '%s\'s %d Reading' % (user.display_name, year),
33 'goal_form': forms.GoalForm(instance=goal),
34 'goal': goal,
35 'user': user,
36 'year': year,
37 'is_self': request.user == user,
38 }
39 return TemplateResponse(request, 'goal.html', data)
40
41
42 def post(self, request, username, year):
43 ''' update or create an annual goal '''
44 user = get_user_from_username(username)
45 if user != request.user:
46 return HttpResponseNotFound()
47
48 year = int(year)
49 goal = models.AnnualGoal.objects.filter(
50 year=year, user=request.user
51 ).first()
52 form = forms.GoalForm(request.POST, instance=goal)
53 if not form.is_valid():
54 data = {
55 'title': '%s\'s %d Reading' % (request.user.display_name, year),
56 'goal_form': form,
57 'goal': goal,
58 'year': year,
59 }
60 return TemplateResponse(request, 'goal.html', data)
61 goal = form.save()
62
63 if request.POST.get('post-status'):
64 # create status, if appropraite
65 create_generated_note(
66 request.user,
67 'set a goal to read %d books in %d' % (goal.goal, goal.year),
68 privacy=goal.privacy
69 )
70
71 return redirect(request.headers.get('Referer', '/'))
72
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/bookwyrm/views/goal.py b/bookwyrm/views/goal.py
--- a/bookwyrm/views/goal.py
+++ b/bookwyrm/views/goal.py
@@ -2,6 +2,7 @@
from django.contrib.auth.decorators import login_required
from django.http import HttpResponseNotFound
from django.shortcuts import redirect
+from django.template.loader import get_template
from django.template.response import TemplateResponse
from django.utils.decorators import method_decorator
from django.views import View
@@ -62,9 +63,10 @@
if request.POST.get('post-status'):
# create status, if appropraite
+ template = get_template('snippets/generated_status/goal.html')
create_generated_note(
request.user,
- 'set a goal to read %d books in %d' % (goal.goal, goal.year),
+ template.render({'goal': goal, 'user': request.user}).strip(),
privacy=goal.privacy
)
|
{"golden_diff": "diff --git a/bookwyrm/views/goal.py b/bookwyrm/views/goal.py\n--- a/bookwyrm/views/goal.py\n+++ b/bookwyrm/views/goal.py\n@@ -2,6 +2,7 @@\n from django.contrib.auth.decorators import login_required\n from django.http import HttpResponseNotFound\n from django.shortcuts import redirect\n+from django.template.loader import get_template\n from django.template.response import TemplateResponse\n from django.utils.decorators import method_decorator\n from django.views import View\n@@ -62,9 +63,10 @@\n \n if request.POST.get('post-status'):\n # create status, if appropraite\n+ template = get_template('snippets/generated_status/goal.html')\n create_generated_note(\n request.user,\n- 'set a goal to read %d books in %d' % (goal.goal, goal.year),\n+ template.render({'goal': goal, 'user': request.user}).strip(),\n privacy=goal.privacy\n )\n", "issue": "Reading goal status doesn't set plurals correctly\nWhen someone is only planning to read 1 book, the status should say \"1 book\" not \"1 books\"\n", "before_files": [{"content": "''' non-interactive pages '''\nfrom django.contrib.auth.decorators import login_required\nfrom django.http import HttpResponseNotFound\nfrom django.shortcuts import redirect\nfrom django.template.response import TemplateResponse\nfrom django.utils.decorators import method_decorator\nfrom django.views import View\n\nfrom bookwyrm import forms, models\nfrom bookwyrm.status import create_generated_note\nfrom .helpers import get_user_from_username, object_visible_to_user\n\n\n# pylint: disable= no-self-use\n@method_decorator(login_required, name='dispatch')\nclass Goal(View):\n ''' track books for the year '''\n def get(self, request, username, year):\n ''' reading goal page '''\n user = get_user_from_username(username)\n year = int(year)\n goal = models.AnnualGoal.objects.filter(\n year=year, user=user\n ).first()\n if not goal and user != request.user:\n return HttpResponseNotFound()\n\n if goal and not object_visible_to_user(request.user, goal):\n return HttpResponseNotFound()\n\n data = {\n 'title': '%s\\'s %d Reading' % (user.display_name, year),\n 'goal_form': forms.GoalForm(instance=goal),\n 'goal': goal,\n 'user': user,\n 'year': year,\n 'is_self': request.user == user,\n }\n return TemplateResponse(request, 'goal.html', data)\n\n\n def post(self, request, username, year):\n ''' update or create an annual goal '''\n user = get_user_from_username(username)\n if user != request.user:\n return HttpResponseNotFound()\n\n year = int(year)\n goal = models.AnnualGoal.objects.filter(\n year=year, user=request.user\n ).first()\n form = forms.GoalForm(request.POST, instance=goal)\n if not form.is_valid():\n data = {\n 'title': '%s\\'s %d Reading' % (request.user.display_name, year),\n 'goal_form': form,\n 'goal': goal,\n 'year': year,\n }\n return TemplateResponse(request, 'goal.html', data)\n goal = form.save()\n\n if request.POST.get('post-status'):\n # create status, if appropraite\n create_generated_note(\n request.user,\n 'set a goal to read %d books in %d' % (goal.goal, goal.year),\n privacy=goal.privacy\n )\n\n return redirect(request.headers.get('Referer', '/'))\n", "path": "bookwyrm/views/goal.py"}], "after_files": [{"content": "''' non-interactive pages '''\nfrom django.contrib.auth.decorators import login_required\nfrom django.http import HttpResponseNotFound\nfrom django.shortcuts import redirect\nfrom django.template.loader import get_template\nfrom django.template.response import TemplateResponse\nfrom django.utils.decorators import method_decorator\nfrom django.views import View\n\nfrom bookwyrm import forms, models\nfrom bookwyrm.status import create_generated_note\nfrom .helpers import get_user_from_username, object_visible_to_user\n\n\n# pylint: disable= no-self-use\n@method_decorator(login_required, name='dispatch')\nclass Goal(View):\n ''' track books for the year '''\n def get(self, request, username, year):\n ''' reading goal page '''\n user = get_user_from_username(username)\n year = int(year)\n goal = models.AnnualGoal.objects.filter(\n year=year, user=user\n ).first()\n if not goal and user != request.user:\n return HttpResponseNotFound()\n\n if goal and not object_visible_to_user(request.user, goal):\n return HttpResponseNotFound()\n\n data = {\n 'title': '%s\\'s %d Reading' % (user.display_name, year),\n 'goal_form': forms.GoalForm(instance=goal),\n 'goal': goal,\n 'user': user,\n 'year': year,\n 'is_self': request.user == user,\n }\n return TemplateResponse(request, 'goal.html', data)\n\n\n def post(self, request, username, year):\n ''' update or create an annual goal '''\n user = get_user_from_username(username)\n if user != request.user:\n return HttpResponseNotFound()\n\n year = int(year)\n goal = models.AnnualGoal.objects.filter(\n year=year, user=request.user\n ).first()\n form = forms.GoalForm(request.POST, instance=goal)\n if not form.is_valid():\n data = {\n 'title': '%s\\'s %d Reading' % (request.user.display_name, year),\n 'goal_form': form,\n 'goal': goal,\n 'year': year,\n }\n return TemplateResponse(request, 'goal.html', data)\n goal = form.save()\n\n if request.POST.get('post-status'):\n # create status, if appropraite\n template = get_template('snippets/generated_status/goal.html')\n create_generated_note(\n request.user,\n template.render({'goal': goal, 'user': request.user}).strip(),\n privacy=goal.privacy\n )\n\n return redirect(request.headers.get('Referer', '/'))\n", "path": "bookwyrm/views/goal.py"}]}
| 949 | 209 |
gh_patches_debug_32496
|
rasdani/github-patches
|
git_diff
|
beeware__toga-2348
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`toga.Image()` doesn't accept `None` as a value for `src`
### Describe the bug
`toga.Image()` doesn't accept `None` as a value for `src`, but according to the docs and code: https://github.com/beeware/toga/blob/61dff563d82da71bd2470cde27882731113eda2d/core/src/toga/images.py#L42C9-L42C41 it should accept `None` as a value.
### Steps to reproduce
1. Clone latest toga/main branch
2. `pip install -e ./core -e ./dummy -e ./gtk`
3. Create a new briefcase project
4. Use the following `app.py` script:
```
"""
My first application
"""
import toga
from toga.style import Pack
from toga.style.pack import COLUMN, ROW
class HelloWorld(toga.App):
def startup(self):
"""
Construct and show the Toga application.
Usually, you would add your application to a main content box.
We then create a main window (with a name matching the app), and
show the main window.
"""
main_box = toga.Box()
img = toga.Image(src=None)
self.main_window = toga.MainWindow(title=self.formal_name)
self.main_window.content = main_box
self.main_window.show()
def main():
return HelloWorld()
```
5. `briefcase dev`
6. Error encountered:
```
[helloworld] Starting in dev mode...
===========================================================================
Traceback (most recent call last):
File "/home/proneon267/Pictures/venv/lib/python3.11/site-packages/toga_gtk/app.py", line 87, in gtk_startup
self.interface._startup()
File "/home/proneon267/Pictures/venv/lib/python3.11/site-packages/toga/app.py", line 629, in _startup
self.startup()
File "/home/proneon267/Pictures/toga/helloworld/src/helloworld/app.py", line 20, in startup
img = toga.Image(src=None)
^^^^^^^^^^^^^^^^^^^^
File "/home/proneon267/Pictures/venv/lib/python3.11/site-packages/toga/images.py", line 63, in __init__
raise TypeError(
TypeError: Image.__init__() missing 1 required positional argument: 'src'
```
### Expected behavior
`toga.Image()` should accept `None` as a value for `src`
### Screenshots
_No response_
### Environment
- Operating System: Arch
- Python version: 3.11.6
- Software versions:
- Briefcase: 0.3.16
- Toga: latest
- ...
### Logs
```
No log files were generated.
```
### Additional context
_No response_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `core/src/toga/images.py`
Content:
```
1 from __future__ import annotations
2
3 import sys
4 import warnings
5 from io import BytesIO
6 from pathlib import Path
7 from typing import TYPE_CHECKING, Any
8 from warnings import warn
9
10 try:
11 import PIL.Image
12
13 PIL_imported = True
14 except ImportError: # pragma: no cover
15 PIL_imported = False
16
17 import toga
18 from toga.platform import get_platform_factory
19
20 # Make sure deprecation warnings are shown by default
21 warnings.filterwarnings("default", category=DeprecationWarning)
22
23 if TYPE_CHECKING:
24 if sys.version_info < (3, 10):
25 from typing_extensions import TypeAlias, TypeVar
26 else:
27 from typing import TypeAlias, TypeVar
28
29 # Define a type variable for generics where an Image type is required.
30 ImageT = TypeVar("ImageT")
31
32 # Define the types that can be used as Image content
33 PathLike: TypeAlias = str | Path
34 BytesLike: TypeAlias = bytes | bytearray | memoryview
35 ImageLike: TypeAlias = Any
36 ImageContent: TypeAlias = PathLike | BytesLike | ImageLike
37
38
39 class Image:
40 def __init__(
41 self,
42 src: ImageContent | None = None,
43 *,
44 path=None, # DEPRECATED
45 data=None, # DEPRECATED
46 ):
47 """Create a new image.
48
49 :param src: The source from which to load the image. Can be any valid
50 :any:`image content <ImageContent>` type.
51 :param path: **DEPRECATED** - Use ``src``.
52 :param data: **DEPRECATED** - Use ``src``.
53 :raises FileNotFoundError: If a path is provided, but that path does not exist.
54 :raises ValueError: If the source cannot be loaded as an image.
55 """
56 ######################################################################
57 # 2023-11: Backwards compatibility
58 ######################################################################
59 num_provided = sum(arg is not None for arg in (src, path, data))
60 if num_provided > 1:
61 raise ValueError("Received multiple arguments to constructor.")
62 if num_provided == 0:
63 raise TypeError(
64 "Image.__init__() missing 1 required positional argument: 'src'"
65 )
66 if path is not None:
67 src = path
68 warn(
69 "Path argument is deprecated, use src instead.",
70 DeprecationWarning,
71 stacklevel=2,
72 )
73 elif data is not None:
74 src = data
75 warn(
76 "Data argument is deprecated, use src instead.",
77 DeprecationWarning,
78 stacklevel=2,
79 )
80 ######################################################################
81 # End backwards compatibility
82 ######################################################################
83
84 self.factory = get_platform_factory()
85 self._path = None
86
87 # Any "lump of bytes" should be valid here.
88 if isinstance(src, (bytes, bytearray, memoryview)):
89 self._impl = self.factory.Image(interface=self, data=src)
90
91 elif isinstance(src, (str, Path)):
92 self._path = toga.App.app.paths.app / src
93 if not self._path.is_file():
94 raise FileNotFoundError(f"Image file {self._path} does not exist")
95 self._impl = self.factory.Image(interface=self, path=self._path)
96
97 elif isinstance(src, Image):
98 self._impl = self.factory.Image(interface=self, data=src.data)
99
100 elif PIL_imported and isinstance(src, PIL.Image.Image):
101 buffer = BytesIO()
102 src.save(buffer, format="png", compress_level=0)
103 self._impl = self.factory.Image(interface=self, data=buffer.getvalue())
104
105 elif isinstance(src, self.factory.Image.RAW_TYPE):
106 self._impl = self.factory.Image(interface=self, raw=src)
107
108 else:
109 raise TypeError("Unsupported source type for Image")
110
111 @property
112 def size(self) -> (int, int):
113 """The size of the image, as a (width, height) tuple."""
114 return (self._impl.get_width(), self._impl.get_height())
115
116 @property
117 def width(self) -> int:
118 """The width of the image, in pixels."""
119 return self._impl.get_width()
120
121 @property
122 def height(self) -> int:
123 """The height of the image, in pixels."""
124 return self._impl.get_height()
125
126 @property
127 def data(self) -> bytes:
128 """The raw data for the image, in PNG format."""
129 return self._impl.get_data()
130
131 @property
132 def path(self) -> Path | None:
133 """The path from which the image was opened, if any (or None)."""
134 return self._path
135
136 def save(self, path: str | Path) -> None:
137 """Save image to given path.
138
139 The file format of the saved image will be determined by the extension of
140 the filename provided (e.g ``path/to/mypicture.png`` will save a PNG file).
141
142 :param path: Path to save the image to.
143 """
144 self._impl.save(path)
145
146 def as_format(self, format: type[ImageT]) -> ImageT:
147 """Return the image, converted to the image format specified.
148
149 :param format: The image class to return. Currently supports only :any:`Image`,
150 and :any:`PIL.Image.Image` if Pillow is installed.
151 :returns: The image in the requested format
152 :raises TypeError: If the format supplied is not recognized.
153 """
154 if isinstance(format, type) and issubclass(format, Image):
155 return format(self.data)
156
157 if PIL_imported and format is PIL.Image.Image:
158 buffer = BytesIO(self.data)
159 with PIL.Image.open(buffer) as pil_image:
160 pil_image.load()
161 return pil_image
162
163 raise TypeError(f"Unknown conversion format for Image: {format}")
164
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/core/src/toga/images.py b/core/src/toga/images.py
--- a/core/src/toga/images.py
+++ b/core/src/toga/images.py
@@ -36,13 +36,16 @@
ImageContent: TypeAlias = PathLike | BytesLike | ImageLike
+NOT_PROVIDED = object()
+
+
class Image:
def __init__(
self,
- src: ImageContent | None = None,
+ src: ImageContent = NOT_PROVIDED,
*,
- path=None, # DEPRECATED
- data=None, # DEPRECATED
+ path=NOT_PROVIDED, # DEPRECATED
+ data=NOT_PROVIDED, # DEPRECATED
):
"""Create a new image.
@@ -56,21 +59,21 @@
######################################################################
# 2023-11: Backwards compatibility
######################################################################
- num_provided = sum(arg is not None for arg in (src, path, data))
+ num_provided = sum(arg is not NOT_PROVIDED for arg in (src, path, data))
if num_provided > 1:
raise ValueError("Received multiple arguments to constructor.")
if num_provided == 0:
raise TypeError(
"Image.__init__() missing 1 required positional argument: 'src'"
)
- if path is not None:
+ if path is not NOT_PROVIDED:
src = path
warn(
"Path argument is deprecated, use src instead.",
DeprecationWarning,
stacklevel=2,
)
- elif data is not None:
+ elif data is not NOT_PROVIDED:
src = data
warn(
"Data argument is deprecated, use src instead.",
|
{"golden_diff": "diff --git a/core/src/toga/images.py b/core/src/toga/images.py\n--- a/core/src/toga/images.py\n+++ b/core/src/toga/images.py\n@@ -36,13 +36,16 @@\n ImageContent: TypeAlias = PathLike | BytesLike | ImageLike\n \n \n+NOT_PROVIDED = object()\n+\n+\n class Image:\n def __init__(\n self,\n- src: ImageContent | None = None,\n+ src: ImageContent = NOT_PROVIDED,\n *,\n- path=None, # DEPRECATED\n- data=None, # DEPRECATED\n+ path=NOT_PROVIDED, # DEPRECATED\n+ data=NOT_PROVIDED, # DEPRECATED\n ):\n \"\"\"Create a new image.\n \n@@ -56,21 +59,21 @@\n ######################################################################\n # 2023-11: Backwards compatibility\n ######################################################################\n- num_provided = sum(arg is not None for arg in (src, path, data))\n+ num_provided = sum(arg is not NOT_PROVIDED for arg in (src, path, data))\n if num_provided > 1:\n raise ValueError(\"Received multiple arguments to constructor.\")\n if num_provided == 0:\n raise TypeError(\n \"Image.__init__() missing 1 required positional argument: 'src'\"\n )\n- if path is not None:\n+ if path is not NOT_PROVIDED:\n src = path\n warn(\n \"Path argument is deprecated, use src instead.\",\n DeprecationWarning,\n stacklevel=2,\n )\n- elif data is not None:\n+ elif data is not NOT_PROVIDED:\n src = data\n warn(\n \"Data argument is deprecated, use src instead.\",\n", "issue": "`toga.Image()` doesn't accept `None` as a value for `src`\n### Describe the bug\n\n`toga.Image()` doesn't accept `None` as a value for `src`, but according to the docs and code: https://github.com/beeware/toga/blob/61dff563d82da71bd2470cde27882731113eda2d/core/src/toga/images.py#L42C9-L42C41 it should accept `None` as a value.\n\n### Steps to reproduce\n\n1. Clone latest toga/main branch\r\n2. `pip install -e ./core -e ./dummy -e ./gtk`\r\n3. Create a new briefcase project\r\n4. Use the following `app.py` script:\r\n```\r\n\"\"\"\r\nMy first application\r\n\"\"\"\r\nimport toga\r\nfrom toga.style import Pack\r\nfrom toga.style.pack import COLUMN, ROW\r\n\r\n\r\nclass HelloWorld(toga.App):\r\n\r\n def startup(self):\r\n \"\"\"\r\n Construct and show the Toga application.\r\n\r\n Usually, you would add your application to a main content box.\r\n We then create a main window (with a name matching the app), and\r\n show the main window.\r\n \"\"\"\r\n main_box = toga.Box()\r\n img = toga.Image(src=None)\r\n self.main_window = toga.MainWindow(title=self.formal_name)\r\n self.main_window.content = main_box\r\n self.main_window.show()\r\n\r\n\r\ndef main():\r\n return HelloWorld()\r\n```\r\n\r\n5. `briefcase dev`\r\n6. Error encountered:\r\n```\r\n\r\n[helloworld] Starting in dev mode...\r\n===========================================================================\r\nTraceback (most recent call last):\r\n File \"/home/proneon267/Pictures/venv/lib/python3.11/site-packages/toga_gtk/app.py\", line 87, in gtk_startup\r\n self.interface._startup()\r\n File \"/home/proneon267/Pictures/venv/lib/python3.11/site-packages/toga/app.py\", line 629, in _startup\r\n self.startup()\r\n File \"/home/proneon267/Pictures/toga/helloworld/src/helloworld/app.py\", line 20, in startup\r\n img = toga.Image(src=None)\r\n ^^^^^^^^^^^^^^^^^^^^\r\n File \"/home/proneon267/Pictures/venv/lib/python3.11/site-packages/toga/images.py\", line 63, in __init__\r\n raise TypeError(\r\nTypeError: Image.__init__() missing 1 required positional argument: 'src'\r\n```\r\n\n\n### Expected behavior\n\n`toga.Image()` should accept `None` as a value for `src`\n\n### Screenshots\n\n_No response_\n\n### Environment\n\n- Operating System: Arch\r\n- Python version: 3.11.6\r\n- Software versions:\r\n - Briefcase: 0.3.16\r\n - Toga: latest\r\n - ...\r\n\n\n### Logs\n\n```\r\nNo log files were generated.\r\n```\r\n\n\n### Additional context\n\n_No response_\n", "before_files": [{"content": "from __future__ import annotations\n\nimport sys\nimport warnings\nfrom io import BytesIO\nfrom pathlib import Path\nfrom typing import TYPE_CHECKING, Any\nfrom warnings import warn\n\ntry:\n import PIL.Image\n\n PIL_imported = True\nexcept ImportError: # pragma: no cover\n PIL_imported = False\n\nimport toga\nfrom toga.platform import get_platform_factory\n\n# Make sure deprecation warnings are shown by default\nwarnings.filterwarnings(\"default\", category=DeprecationWarning)\n\nif TYPE_CHECKING:\n if sys.version_info < (3, 10):\n from typing_extensions import TypeAlias, TypeVar\n else:\n from typing import TypeAlias, TypeVar\n\n # Define a type variable for generics where an Image type is required.\n ImageT = TypeVar(\"ImageT\")\n\n # Define the types that can be used as Image content\n PathLike: TypeAlias = str | Path\n BytesLike: TypeAlias = bytes | bytearray | memoryview\n ImageLike: TypeAlias = Any\n ImageContent: TypeAlias = PathLike | BytesLike | ImageLike\n\n\nclass Image:\n def __init__(\n self,\n src: ImageContent | None = None,\n *,\n path=None, # DEPRECATED\n data=None, # DEPRECATED\n ):\n \"\"\"Create a new image.\n\n :param src: The source from which to load the image. Can be any valid\n :any:`image content <ImageContent>` type.\n :param path: **DEPRECATED** - Use ``src``.\n :param data: **DEPRECATED** - Use ``src``.\n :raises FileNotFoundError: If a path is provided, but that path does not exist.\n :raises ValueError: If the source cannot be loaded as an image.\n \"\"\"\n ######################################################################\n # 2023-11: Backwards compatibility\n ######################################################################\n num_provided = sum(arg is not None for arg in (src, path, data))\n if num_provided > 1:\n raise ValueError(\"Received multiple arguments to constructor.\")\n if num_provided == 0:\n raise TypeError(\n \"Image.__init__() missing 1 required positional argument: 'src'\"\n )\n if path is not None:\n src = path\n warn(\n \"Path argument is deprecated, use src instead.\",\n DeprecationWarning,\n stacklevel=2,\n )\n elif data is not None:\n src = data\n warn(\n \"Data argument is deprecated, use src instead.\",\n DeprecationWarning,\n stacklevel=2,\n )\n ######################################################################\n # End backwards compatibility\n ######################################################################\n\n self.factory = get_platform_factory()\n self._path = None\n\n # Any \"lump of bytes\" should be valid here.\n if isinstance(src, (bytes, bytearray, memoryview)):\n self._impl = self.factory.Image(interface=self, data=src)\n\n elif isinstance(src, (str, Path)):\n self._path = toga.App.app.paths.app / src\n if not self._path.is_file():\n raise FileNotFoundError(f\"Image file {self._path} does not exist\")\n self._impl = self.factory.Image(interface=self, path=self._path)\n\n elif isinstance(src, Image):\n self._impl = self.factory.Image(interface=self, data=src.data)\n\n elif PIL_imported and isinstance(src, PIL.Image.Image):\n buffer = BytesIO()\n src.save(buffer, format=\"png\", compress_level=0)\n self._impl = self.factory.Image(interface=self, data=buffer.getvalue())\n\n elif isinstance(src, self.factory.Image.RAW_TYPE):\n self._impl = self.factory.Image(interface=self, raw=src)\n\n else:\n raise TypeError(\"Unsupported source type for Image\")\n\n @property\n def size(self) -> (int, int):\n \"\"\"The size of the image, as a (width, height) tuple.\"\"\"\n return (self._impl.get_width(), self._impl.get_height())\n\n @property\n def width(self) -> int:\n \"\"\"The width of the image, in pixels.\"\"\"\n return self._impl.get_width()\n\n @property\n def height(self) -> int:\n \"\"\"The height of the image, in pixels.\"\"\"\n return self._impl.get_height()\n\n @property\n def data(self) -> bytes:\n \"\"\"The raw data for the image, in PNG format.\"\"\"\n return self._impl.get_data()\n\n @property\n def path(self) -> Path | None:\n \"\"\"The path from which the image was opened, if any (or None).\"\"\"\n return self._path\n\n def save(self, path: str | Path) -> None:\n \"\"\"Save image to given path.\n\n The file format of the saved image will be determined by the extension of\n the filename provided (e.g ``path/to/mypicture.png`` will save a PNG file).\n\n :param path: Path to save the image to.\n \"\"\"\n self._impl.save(path)\n\n def as_format(self, format: type[ImageT]) -> ImageT:\n \"\"\"Return the image, converted to the image format specified.\n\n :param format: The image class to return. Currently supports only :any:`Image`,\n and :any:`PIL.Image.Image` if Pillow is installed.\n :returns: The image in the requested format\n :raises TypeError: If the format supplied is not recognized.\n \"\"\"\n if isinstance(format, type) and issubclass(format, Image):\n return format(self.data)\n\n if PIL_imported and format is PIL.Image.Image:\n buffer = BytesIO(self.data)\n with PIL.Image.open(buffer) as pil_image:\n pil_image.load()\n return pil_image\n\n raise TypeError(f\"Unknown conversion format for Image: {format}\")\n", "path": "core/src/toga/images.py"}], "after_files": [{"content": "from __future__ import annotations\n\nimport sys\nimport warnings\nfrom io import BytesIO\nfrom pathlib import Path\nfrom typing import TYPE_CHECKING, Any\nfrom warnings import warn\n\ntry:\n import PIL.Image\n\n PIL_imported = True\nexcept ImportError: # pragma: no cover\n PIL_imported = False\n\nimport toga\nfrom toga.platform import get_platform_factory\n\n# Make sure deprecation warnings are shown by default\nwarnings.filterwarnings(\"default\", category=DeprecationWarning)\n\nif TYPE_CHECKING:\n if sys.version_info < (3, 10):\n from typing_extensions import TypeAlias, TypeVar\n else:\n from typing import TypeAlias, TypeVar\n\n # Define a type variable for generics where an Image type is required.\n ImageT = TypeVar(\"ImageT\")\n\n # Define the types that can be used as Image content\n PathLike: TypeAlias = str | Path\n BytesLike: TypeAlias = bytes | bytearray | memoryview\n ImageLike: TypeAlias = Any\n ImageContent: TypeAlias = PathLike | BytesLike | ImageLike\n\n\nNOT_PROVIDED = object()\n\n\nclass Image:\n def __init__(\n self,\n src: ImageContent = NOT_PROVIDED,\n *,\n path=NOT_PROVIDED, # DEPRECATED\n data=NOT_PROVIDED, # DEPRECATED\n ):\n \"\"\"Create a new image.\n\n :param src: The source from which to load the image. Can be any valid\n :any:`image content <ImageContent>` type.\n :param path: **DEPRECATED** - Use ``src``.\n :param data: **DEPRECATED** - Use ``src``.\n :raises FileNotFoundError: If a path is provided, but that path does not exist.\n :raises ValueError: If the source cannot be loaded as an image.\n \"\"\"\n ######################################################################\n # 2023-11: Backwards compatibility\n ######################################################################\n num_provided = sum(arg is not NOT_PROVIDED for arg in (src, path, data))\n if num_provided > 1:\n raise ValueError(\"Received multiple arguments to constructor.\")\n if num_provided == 0:\n raise TypeError(\n \"Image.__init__() missing 1 required positional argument: 'src'\"\n )\n if path is not NOT_PROVIDED:\n src = path\n warn(\n \"Path argument is deprecated, use src instead.\",\n DeprecationWarning,\n stacklevel=2,\n )\n elif data is not NOT_PROVIDED:\n src = data\n warn(\n \"Data argument is deprecated, use src instead.\",\n DeprecationWarning,\n stacklevel=2,\n )\n ######################################################################\n # End backwards compatibility\n ######################################################################\n\n self.factory = get_platform_factory()\n self._path = None\n\n # Any \"lump of bytes\" should be valid here.\n if isinstance(src, (bytes, bytearray, memoryview)):\n self._impl = self.factory.Image(interface=self, data=src)\n\n elif isinstance(src, (str, Path)):\n self._path = toga.App.app.paths.app / src\n if not self._path.is_file():\n raise FileNotFoundError(f\"Image file {self._path} does not exist\")\n self._impl = self.factory.Image(interface=self, path=self._path)\n\n elif isinstance(src, Image):\n self._impl = self.factory.Image(interface=self, data=src.data)\n\n elif PIL_imported and isinstance(src, PIL.Image.Image):\n buffer = BytesIO()\n src.save(buffer, format=\"png\", compress_level=0)\n self._impl = self.factory.Image(interface=self, data=buffer.getvalue())\n\n elif isinstance(src, self.factory.Image.RAW_TYPE):\n self._impl = self.factory.Image(interface=self, raw=src)\n\n else:\n raise TypeError(\"Unsupported source type for Image\")\n\n @property\n def size(self) -> (int, int):\n \"\"\"The size of the image, as a (width, height) tuple.\"\"\"\n return (self._impl.get_width(), self._impl.get_height())\n\n @property\n def width(self) -> int:\n \"\"\"The width of the image, in pixels.\"\"\"\n return self._impl.get_width()\n\n @property\n def height(self) -> int:\n \"\"\"The height of the image, in pixels.\"\"\"\n return self._impl.get_height()\n\n @property\n def data(self) -> bytes:\n \"\"\"The raw data for the image, in PNG format.\"\"\"\n return self._impl.get_data()\n\n @property\n def path(self) -> Path | None:\n \"\"\"The path from which the image was opened, if any (or None).\"\"\"\n return self._path\n\n def save(self, path: str | Path) -> None:\n \"\"\"Save image to given path.\n\n The file format of the saved image will be determined by the extension of\n the filename provided (e.g ``path/to/mypicture.png`` will save a PNG file).\n\n :param path: Path to save the image to.\n \"\"\"\n self._impl.save(path)\n\n def as_format(self, format: type[ImageT]) -> ImageT:\n \"\"\"Return the image, converted to the image format specified.\n\n :param format: The image class to return. Currently supports only :any:`Image`,\n and :any:`PIL.Image.Image` if Pillow is installed.\n :returns: The image in the requested format\n :raises TypeError: If the format supplied is not recognized.\n \"\"\"\n if isinstance(format, type) and issubclass(format, Image):\n return format(self.data)\n\n if PIL_imported and format is PIL.Image.Image:\n buffer = BytesIO(self.data)\n with PIL.Image.open(buffer) as pil_image:\n pil_image.load()\n return pil_image\n\n raise TypeError(f\"Unknown conversion format for Image: {format}\")\n", "path": "core/src/toga/images.py"}]}
| 2,541 | 384 |
gh_patches_debug_33024
|
rasdani/github-patches
|
git_diff
|
svthalia__concrexit-1872
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
PaymentError: This payment has already been processed and hence cannot be deleted.
Sentry Issue: [CONCREXIT-8C](https://sentry.io/organizations/thalia/issues/2568657203/?referrer=github_integration)
```
PaymentError: This payment has already been processed and hence cannot be deleted.
(1 additional frame(s) were not displayed)
...
File "django/core/handlers/base.py", line 181, in _get_response
response = wrapped_callback(request, *callback_args, **callback_kwargs)
File "django/views/decorators/http.py", line 40, in inner
return func(request, *args, **kwargs)
File "pizzas/views.py", line 38, in cancel_order
order.delete()
File "pizzas/models.py", line 256, in delete
delete_payment(self)
File "payments/services.py", line 98, in delete_payment
raise PaymentError(
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `website/pizzas/api/v2/views.py`
Content:
```
1 from oauth2_provider.contrib.rest_framework import IsAuthenticatedOrTokenHasScope
2 from rest_framework.generics import (
3 ListAPIView,
4 RetrieveAPIView,
5 get_object_or_404,
6 CreateAPIView,
7 DestroyAPIView,
8 UpdateAPIView,
9 )
10
11 from rest_framework import filters as framework_filters, status
12 from rest_framework.response import Response
13
14 from payments.exceptions import PaymentError
15 from payments.services import delete_payment
16 from pizzas.api.v2 import filters
17 from pizzas.api.v2.serializers import (
18 ProductSerializer,
19 FoodOrderSerializer,
20 FoodOrderUpdateSerializer,
21 FoodOrderCreateSerializer,
22 )
23 from pizzas.api.v2.serializers.food_event import FoodEventSerializer
24 from pizzas.models import FoodEvent, Product, FoodOrder
25 from thaliawebsite.api.v2.permissions import IsAuthenticatedOrTokenHasScopeForMethod
26
27
28 class FoodEventListView(ListAPIView):
29 """Returns an overview of all food events."""
30
31 serializer_class = FoodEventSerializer
32 queryset = FoodEvent.objects.all()
33 filter_backends = (
34 framework_filters.OrderingFilter,
35 filters.FoodEventDateFilterBackend,
36 )
37 ordering_fields = ("start", "end")
38 permission_classes = [
39 IsAuthenticatedOrTokenHasScope,
40 ]
41 required_scopes = ["food:read"]
42
43
44 class FoodEventDetailView(RetrieveAPIView):
45 """Returns one single food event."""
46
47 serializer_class = FoodEventSerializer
48 queryset = FoodEvent.objects.all()
49 permission_classes = [
50 IsAuthenticatedOrTokenHasScope,
51 ]
52 required_scopes = ["food:read"]
53
54
55 class FoodEventProductsListView(ListAPIView):
56 """Returns an overview of all products."""
57
58 serializer_class = ProductSerializer
59 queryset = Product.available_products.all()
60 filter_backends = (framework_filters.SearchFilter,)
61 search_fields = ("name",)
62 permission_classes = [
63 IsAuthenticatedOrTokenHasScope,
64 ]
65 required_scopes = ["food:read"]
66
67
68 class FoodEventOrderDetailView(
69 RetrieveAPIView, CreateAPIView, UpdateAPIView, DestroyAPIView
70 ):
71 """Returns details of a food order."""
72
73 permission_classes = [
74 IsAuthenticatedOrTokenHasScopeForMethod,
75 ]
76 required_scopes_per_method = {
77 "GET": ["food:read"],
78 "POST": ["food:order"],
79 "PUT": ["food:order"],
80 "PATCH": ["food:order"],
81 "DELETE": ["food:order"],
82 }
83
84 def get_serializer_class(self):
85 if self.request.method.lower() == "get":
86 return FoodOrderSerializer
87 if self.request.method.lower() == "post":
88 return FoodOrderCreateSerializer
89 return FoodOrderUpdateSerializer
90
91 def get_queryset(self):
92 return FoodOrder.objects.filter(food_event=self.food_event)
93
94 def get_object(self):
95 queryset = self.filter_queryset(self.get_queryset())
96 obj = get_object_or_404(queryset, member=self.request.member)
97
98 # May raise a permission denied
99 self.check_object_permissions(self.request, obj)
100
101 return obj
102
103 def dispatch(self, request, *args, **kwargs):
104 self.food_event = get_object_or_404(FoodEvent, pk=self.kwargs.get("pk"))
105 return super().dispatch(request, *args, **kwargs)
106
107 def update(self, request, *args, **kwargs):
108 super().update(request, *args, **kwargs)
109 instance = self.get_object()
110
111 if instance.payment:
112 try:
113 delete_payment(
114 instance, member=request.member, ignore_change_window=True
115 )
116 except PaymentError:
117 return Response(
118 "Your order could not be updated because it was already paid.",
119 status=status.HTTP_403_FORBIDDEN,
120 )
121
122 return Response(
123 FoodOrderSerializer(instance, context=self.get_serializer_context()).data
124 )
125
126 def create(self, request, *args, **kwargs):
127 serializer = self.get_serializer(data=request.data)
128 serializer.is_valid(raise_exception=True)
129 self.perform_create(serializer)
130 return Response(
131 FoodOrderSerializer(
132 serializer.instance, context=self.get_serializer_context()
133 ).data,
134 status=status.HTTP_201_CREATED,
135 )
136
```
Path: `website/pizzas/views.py`
Content:
```
1 """Views provided by the pizzas package."""
2 from django.contrib import messages
3 from django.contrib.auth.decorators import login_required
4 from django.http import Http404
5 from django.shortcuts import get_object_or_404, render, redirect
6 from django.utils.translation import gettext_lazy as _
7 from django.views.decorators.http import require_http_methods
8
9 from payments.exceptions import PaymentError
10 from payments.services import delete_payment
11 from .models import FoodOrder, FoodEvent, Product
12
13
14 @login_required
15 def index(request):
16 """Overview of user order for a pizza event."""
17 products = Product.available_products.order_by("name")
18 if not request.user.has_perm("pizzas.order_restricted_products"):
19 products = products.exclude(restricted=True)
20 event = FoodEvent.current()
21 try:
22 obj = FoodOrder.objects.get(food_event=event, member=request.member)
23 except FoodOrder.DoesNotExist:
24 obj = None
25 context = {"event": event, "products": products, "order": obj}
26 return render(request, "pizzas/index.html", context)
27
28
29 @require_http_methods(["POST"])
30 def cancel_order(request):
31 """View that cancels a user's order."""
32 if "order" in request.POST:
33 try:
34 order = get_object_or_404(FoodOrder, pk=int(request.POST["order"]))
35 if not order.can_be_changed:
36 messages.error(request, _("You can no longer cancel."))
37 elif order.member == request.member:
38 order.delete()
39 messages.success(request, _("Your order has been cancelled."))
40 except Http404:
41 messages.error(request, _("Your order could not be found."))
42 return redirect("pizzas:index")
43
44
45 @login_required
46 def place_order(request):
47 """View that shows the detail of the current order."""
48 event = FoodEvent.current()
49 if not event:
50 return redirect("pizzas:index")
51
52 try:
53 obj = FoodOrder.objects.get(food_event=event, member=request.member)
54 current_order_locked = not obj.can_be_changed
55 except FoodOrder.DoesNotExist:
56 obj = None
57 current_order_locked = False
58
59 if "product" in request.POST and not current_order_locked:
60 productset = Product.available_products.all()
61 if not request.user.has_perm("pizzas.order_restricted_products"):
62 productset = productset.exclude(restricted=True)
63 try:
64 product = productset.get(pk=int(request.POST["product"]))
65 except Product.DoesNotExist as e:
66 raise Http404("Pizza does not exist") from e
67 if not obj:
68 obj = FoodOrder(food_event=event, member=request.member)
69 obj.product = product
70 if obj.payment:
71 try:
72 delete_payment(obj, member=request.member, ignore_change_window=True)
73 except PaymentError:
74 messages.error(
75 request,
76 _("Your order could not be updated because it was already paid."),
77 )
78 return redirect("pizzas:index")
79 obj.save()
80 return redirect("pizzas:index")
81
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/website/pizzas/api/v2/views.py b/website/pizzas/api/v2/views.py
--- a/website/pizzas/api/v2/views.py
+++ b/website/pizzas/api/v2/views.py
@@ -102,22 +102,17 @@
def dispatch(self, request, *args, **kwargs):
self.food_event = get_object_or_404(FoodEvent, pk=self.kwargs.get("pk"))
- return super().dispatch(request, *args, **kwargs)
+ try:
+ return super().dispatch(request, *args, **kwargs)
+ except PaymentError as e:
+ return Response(str(e), status=status.HTTP_403_FORBIDDEN,)
def update(self, request, *args, **kwargs):
super().update(request, *args, **kwargs)
instance = self.get_object()
if instance.payment:
- try:
- delete_payment(
- instance, member=request.member, ignore_change_window=True
- )
- except PaymentError:
- return Response(
- "Your order could not be updated because it was already paid.",
- status=status.HTTP_403_FORBIDDEN,
- )
+ delete_payment(instance, member=request.member, ignore_change_window=True)
return Response(
FoodOrderSerializer(instance, context=self.get_serializer_context()).data
diff --git a/website/pizzas/views.py b/website/pizzas/views.py
--- a/website/pizzas/views.py
+++ b/website/pizzas/views.py
@@ -35,8 +35,11 @@
if not order.can_be_changed:
messages.error(request, _("You can no longer cancel."))
elif order.member == request.member:
- order.delete()
- messages.success(request, _("Your order has been cancelled."))
+ try:
+ order.delete()
+ messages.success(request, _("Your order has been cancelled."))
+ except PaymentError as e:
+ messages.error(request, str(e))
except Http404:
messages.error(request, _("Your order could not be found."))
return redirect("pizzas:index")
|
{"golden_diff": "diff --git a/website/pizzas/api/v2/views.py b/website/pizzas/api/v2/views.py\n--- a/website/pizzas/api/v2/views.py\n+++ b/website/pizzas/api/v2/views.py\n@@ -102,22 +102,17 @@\n \n def dispatch(self, request, *args, **kwargs):\n self.food_event = get_object_or_404(FoodEvent, pk=self.kwargs.get(\"pk\"))\n- return super().dispatch(request, *args, **kwargs)\n+ try:\n+ return super().dispatch(request, *args, **kwargs)\n+ except PaymentError as e:\n+ return Response(str(e), status=status.HTTP_403_FORBIDDEN,)\n \n def update(self, request, *args, **kwargs):\n super().update(request, *args, **kwargs)\n instance = self.get_object()\n \n if instance.payment:\n- try:\n- delete_payment(\n- instance, member=request.member, ignore_change_window=True\n- )\n- except PaymentError:\n- return Response(\n- \"Your order could not be updated because it was already paid.\",\n- status=status.HTTP_403_FORBIDDEN,\n- )\n+ delete_payment(instance, member=request.member, ignore_change_window=True)\n \n return Response(\n FoodOrderSerializer(instance, context=self.get_serializer_context()).data\ndiff --git a/website/pizzas/views.py b/website/pizzas/views.py\n--- a/website/pizzas/views.py\n+++ b/website/pizzas/views.py\n@@ -35,8 +35,11 @@\n if not order.can_be_changed:\n messages.error(request, _(\"You can no longer cancel.\"))\n elif order.member == request.member:\n- order.delete()\n- messages.success(request, _(\"Your order has been cancelled.\"))\n+ try:\n+ order.delete()\n+ messages.success(request, _(\"Your order has been cancelled.\"))\n+ except PaymentError as e:\n+ messages.error(request, str(e))\n except Http404:\n messages.error(request, _(\"Your order could not be found.\"))\n return redirect(\"pizzas:index\")\n", "issue": "PaymentError: This payment has already been processed and hence cannot be deleted.\nSentry Issue: [CONCREXIT-8C](https://sentry.io/organizations/thalia/issues/2568657203/?referrer=github_integration)\n\n```\nPaymentError: This payment has already been processed and hence cannot be deleted.\n(1 additional frame(s) were not displayed)\n...\n File \"django/core/handlers/base.py\", line 181, in _get_response\n response = wrapped_callback(request, *callback_args, **callback_kwargs)\n File \"django/views/decorators/http.py\", line 40, in inner\n return func(request, *args, **kwargs)\n File \"pizzas/views.py\", line 38, in cancel_order\n order.delete()\n File \"pizzas/models.py\", line 256, in delete\n delete_payment(self)\n File \"payments/services.py\", line 98, in delete_payment\n raise PaymentError(\n```\n", "before_files": [{"content": "from oauth2_provider.contrib.rest_framework import IsAuthenticatedOrTokenHasScope\nfrom rest_framework.generics import (\n ListAPIView,\n RetrieveAPIView,\n get_object_or_404,\n CreateAPIView,\n DestroyAPIView,\n UpdateAPIView,\n)\n\nfrom rest_framework import filters as framework_filters, status\nfrom rest_framework.response import Response\n\nfrom payments.exceptions import PaymentError\nfrom payments.services import delete_payment\nfrom pizzas.api.v2 import filters\nfrom pizzas.api.v2.serializers import (\n ProductSerializer,\n FoodOrderSerializer,\n FoodOrderUpdateSerializer,\n FoodOrderCreateSerializer,\n)\nfrom pizzas.api.v2.serializers.food_event import FoodEventSerializer\nfrom pizzas.models import FoodEvent, Product, FoodOrder\nfrom thaliawebsite.api.v2.permissions import IsAuthenticatedOrTokenHasScopeForMethod\n\n\nclass FoodEventListView(ListAPIView):\n \"\"\"Returns an overview of all food events.\"\"\"\n\n serializer_class = FoodEventSerializer\n queryset = FoodEvent.objects.all()\n filter_backends = (\n framework_filters.OrderingFilter,\n filters.FoodEventDateFilterBackend,\n )\n ordering_fields = (\"start\", \"end\")\n permission_classes = [\n IsAuthenticatedOrTokenHasScope,\n ]\n required_scopes = [\"food:read\"]\n\n\nclass FoodEventDetailView(RetrieveAPIView):\n \"\"\"Returns one single food event.\"\"\"\n\n serializer_class = FoodEventSerializer\n queryset = FoodEvent.objects.all()\n permission_classes = [\n IsAuthenticatedOrTokenHasScope,\n ]\n required_scopes = [\"food:read\"]\n\n\nclass FoodEventProductsListView(ListAPIView):\n \"\"\"Returns an overview of all products.\"\"\"\n\n serializer_class = ProductSerializer\n queryset = Product.available_products.all()\n filter_backends = (framework_filters.SearchFilter,)\n search_fields = (\"name\",)\n permission_classes = [\n IsAuthenticatedOrTokenHasScope,\n ]\n required_scopes = [\"food:read\"]\n\n\nclass FoodEventOrderDetailView(\n RetrieveAPIView, CreateAPIView, UpdateAPIView, DestroyAPIView\n):\n \"\"\"Returns details of a food order.\"\"\"\n\n permission_classes = [\n IsAuthenticatedOrTokenHasScopeForMethod,\n ]\n required_scopes_per_method = {\n \"GET\": [\"food:read\"],\n \"POST\": [\"food:order\"],\n \"PUT\": [\"food:order\"],\n \"PATCH\": [\"food:order\"],\n \"DELETE\": [\"food:order\"],\n }\n\n def get_serializer_class(self):\n if self.request.method.lower() == \"get\":\n return FoodOrderSerializer\n if self.request.method.lower() == \"post\":\n return FoodOrderCreateSerializer\n return FoodOrderUpdateSerializer\n\n def get_queryset(self):\n return FoodOrder.objects.filter(food_event=self.food_event)\n\n def get_object(self):\n queryset = self.filter_queryset(self.get_queryset())\n obj = get_object_or_404(queryset, member=self.request.member)\n\n # May raise a permission denied\n self.check_object_permissions(self.request, obj)\n\n return obj\n\n def dispatch(self, request, *args, **kwargs):\n self.food_event = get_object_or_404(FoodEvent, pk=self.kwargs.get(\"pk\"))\n return super().dispatch(request, *args, **kwargs)\n\n def update(self, request, *args, **kwargs):\n super().update(request, *args, **kwargs)\n instance = self.get_object()\n\n if instance.payment:\n try:\n delete_payment(\n instance, member=request.member, ignore_change_window=True\n )\n except PaymentError:\n return Response(\n \"Your order could not be updated because it was already paid.\",\n status=status.HTTP_403_FORBIDDEN,\n )\n\n return Response(\n FoodOrderSerializer(instance, context=self.get_serializer_context()).data\n )\n\n def create(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n self.perform_create(serializer)\n return Response(\n FoodOrderSerializer(\n serializer.instance, context=self.get_serializer_context()\n ).data,\n status=status.HTTP_201_CREATED,\n )\n", "path": "website/pizzas/api/v2/views.py"}, {"content": "\"\"\"Views provided by the pizzas package.\"\"\"\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom django.http import Http404\nfrom django.shortcuts import get_object_or_404, render, redirect\nfrom django.utils.translation import gettext_lazy as _\nfrom django.views.decorators.http import require_http_methods\n\nfrom payments.exceptions import PaymentError\nfrom payments.services import delete_payment\nfrom .models import FoodOrder, FoodEvent, Product\n\n\n@login_required\ndef index(request):\n \"\"\"Overview of user order for a pizza event.\"\"\"\n products = Product.available_products.order_by(\"name\")\n if not request.user.has_perm(\"pizzas.order_restricted_products\"):\n products = products.exclude(restricted=True)\n event = FoodEvent.current()\n try:\n obj = FoodOrder.objects.get(food_event=event, member=request.member)\n except FoodOrder.DoesNotExist:\n obj = None\n context = {\"event\": event, \"products\": products, \"order\": obj}\n return render(request, \"pizzas/index.html\", context)\n\n\n@require_http_methods([\"POST\"])\ndef cancel_order(request):\n \"\"\"View that cancels a user's order.\"\"\"\n if \"order\" in request.POST:\n try:\n order = get_object_or_404(FoodOrder, pk=int(request.POST[\"order\"]))\n if not order.can_be_changed:\n messages.error(request, _(\"You can no longer cancel.\"))\n elif order.member == request.member:\n order.delete()\n messages.success(request, _(\"Your order has been cancelled.\"))\n except Http404:\n messages.error(request, _(\"Your order could not be found.\"))\n return redirect(\"pizzas:index\")\n\n\n@login_required\ndef place_order(request):\n \"\"\"View that shows the detail of the current order.\"\"\"\n event = FoodEvent.current()\n if not event:\n return redirect(\"pizzas:index\")\n\n try:\n obj = FoodOrder.objects.get(food_event=event, member=request.member)\n current_order_locked = not obj.can_be_changed\n except FoodOrder.DoesNotExist:\n obj = None\n current_order_locked = False\n\n if \"product\" in request.POST and not current_order_locked:\n productset = Product.available_products.all()\n if not request.user.has_perm(\"pizzas.order_restricted_products\"):\n productset = productset.exclude(restricted=True)\n try:\n product = productset.get(pk=int(request.POST[\"product\"]))\n except Product.DoesNotExist as e:\n raise Http404(\"Pizza does not exist\") from e\n if not obj:\n obj = FoodOrder(food_event=event, member=request.member)\n obj.product = product\n if obj.payment:\n try:\n delete_payment(obj, member=request.member, ignore_change_window=True)\n except PaymentError:\n messages.error(\n request,\n _(\"Your order could not be updated because it was already paid.\"),\n )\n return redirect(\"pizzas:index\")\n obj.save()\n return redirect(\"pizzas:index\")\n", "path": "website/pizzas/views.py"}], "after_files": [{"content": "from oauth2_provider.contrib.rest_framework import IsAuthenticatedOrTokenHasScope\nfrom rest_framework.generics import (\n ListAPIView,\n RetrieveAPIView,\n get_object_or_404,\n CreateAPIView,\n DestroyAPIView,\n UpdateAPIView,\n)\n\nfrom rest_framework import filters as framework_filters, status\nfrom rest_framework.response import Response\n\nfrom payments.exceptions import PaymentError\nfrom payments.services import delete_payment\nfrom pizzas.api.v2 import filters\nfrom pizzas.api.v2.serializers import (\n ProductSerializer,\n FoodOrderSerializer,\n FoodOrderUpdateSerializer,\n FoodOrderCreateSerializer,\n)\nfrom pizzas.api.v2.serializers.food_event import FoodEventSerializer\nfrom pizzas.models import FoodEvent, Product, FoodOrder\nfrom thaliawebsite.api.v2.permissions import IsAuthenticatedOrTokenHasScopeForMethod\n\n\nclass FoodEventListView(ListAPIView):\n \"\"\"Returns an overview of all food events.\"\"\"\n\n serializer_class = FoodEventSerializer\n queryset = FoodEvent.objects.all()\n filter_backends = (\n framework_filters.OrderingFilter,\n filters.FoodEventDateFilterBackend,\n )\n ordering_fields = (\"start\", \"end\")\n permission_classes = [\n IsAuthenticatedOrTokenHasScope,\n ]\n required_scopes = [\"food:read\"]\n\n\nclass FoodEventDetailView(RetrieveAPIView):\n \"\"\"Returns one single food event.\"\"\"\n\n serializer_class = FoodEventSerializer\n queryset = FoodEvent.objects.all()\n permission_classes = [\n IsAuthenticatedOrTokenHasScope,\n ]\n required_scopes = [\"food:read\"]\n\n\nclass FoodEventProductsListView(ListAPIView):\n \"\"\"Returns an overview of all products.\"\"\"\n\n serializer_class = ProductSerializer\n queryset = Product.available_products.all()\n filter_backends = (framework_filters.SearchFilter,)\n search_fields = (\"name\",)\n permission_classes = [\n IsAuthenticatedOrTokenHasScope,\n ]\n required_scopes = [\"food:read\"]\n\n\nclass FoodEventOrderDetailView(\n RetrieveAPIView, CreateAPIView, UpdateAPIView, DestroyAPIView\n):\n \"\"\"Returns details of a food order.\"\"\"\n\n permission_classes = [\n IsAuthenticatedOrTokenHasScopeForMethod,\n ]\n required_scopes_per_method = {\n \"GET\": [\"food:read\"],\n \"POST\": [\"food:order\"],\n \"PUT\": [\"food:order\"],\n \"PATCH\": [\"food:order\"],\n \"DELETE\": [\"food:order\"],\n }\n\n def get_serializer_class(self):\n if self.request.method.lower() == \"get\":\n return FoodOrderSerializer\n if self.request.method.lower() == \"post\":\n return FoodOrderCreateSerializer\n return FoodOrderUpdateSerializer\n\n def get_queryset(self):\n return FoodOrder.objects.filter(food_event=self.food_event)\n\n def get_object(self):\n queryset = self.filter_queryset(self.get_queryset())\n obj = get_object_or_404(queryset, member=self.request.member)\n\n # May raise a permission denied\n self.check_object_permissions(self.request, obj)\n\n return obj\n\n def dispatch(self, request, *args, **kwargs):\n self.food_event = get_object_or_404(FoodEvent, pk=self.kwargs.get(\"pk\"))\n try:\n return super().dispatch(request, *args, **kwargs)\n except PaymentError as e:\n return Response(str(e), status=status.HTTP_403_FORBIDDEN,)\n\n def update(self, request, *args, **kwargs):\n super().update(request, *args, **kwargs)\n instance = self.get_object()\n\n if instance.payment:\n delete_payment(instance, member=request.member, ignore_change_window=True)\n\n return Response(\n FoodOrderSerializer(instance, context=self.get_serializer_context()).data\n )\n\n def create(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n self.perform_create(serializer)\n return Response(\n FoodOrderSerializer(\n serializer.instance, context=self.get_serializer_context()\n ).data,\n status=status.HTTP_201_CREATED,\n )\n", "path": "website/pizzas/api/v2/views.py"}, {"content": "\"\"\"Views provided by the pizzas package.\"\"\"\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom django.http import Http404\nfrom django.shortcuts import get_object_or_404, render, redirect\nfrom django.utils.translation import gettext_lazy as _\nfrom django.views.decorators.http import require_http_methods\n\nfrom payments.exceptions import PaymentError\nfrom payments.services import delete_payment\nfrom .models import FoodOrder, FoodEvent, Product\n\n\n@login_required\ndef index(request):\n \"\"\"Overview of user order for a pizza event.\"\"\"\n products = Product.available_products.order_by(\"name\")\n if not request.user.has_perm(\"pizzas.order_restricted_products\"):\n products = products.exclude(restricted=True)\n event = FoodEvent.current()\n try:\n obj = FoodOrder.objects.get(food_event=event, member=request.member)\n except FoodOrder.DoesNotExist:\n obj = None\n context = {\"event\": event, \"products\": products, \"order\": obj}\n return render(request, \"pizzas/index.html\", context)\n\n\n@require_http_methods([\"POST\"])\ndef cancel_order(request):\n \"\"\"View that cancels a user's order.\"\"\"\n if \"order\" in request.POST:\n try:\n order = get_object_or_404(FoodOrder, pk=int(request.POST[\"order\"]))\n if not order.can_be_changed:\n messages.error(request, _(\"You can no longer cancel.\"))\n elif order.member == request.member:\n try:\n order.delete()\n messages.success(request, _(\"Your order has been cancelled.\"))\n except PaymentError as e:\n messages.error(request, str(e))\n except Http404:\n messages.error(request, _(\"Your order could not be found.\"))\n return redirect(\"pizzas:index\")\n\n\n@login_required\ndef place_order(request):\n \"\"\"View that shows the detail of the current order.\"\"\"\n event = FoodEvent.current()\n if not event:\n return redirect(\"pizzas:index\")\n\n try:\n obj = FoodOrder.objects.get(food_event=event, member=request.member)\n current_order_locked = not obj.can_be_changed\n except FoodOrder.DoesNotExist:\n obj = None\n current_order_locked = False\n\n if \"product\" in request.POST and not current_order_locked:\n productset = Product.available_products.all()\n if not request.user.has_perm(\"pizzas.order_restricted_products\"):\n productset = productset.exclude(restricted=True)\n try:\n product = productset.get(pk=int(request.POST[\"product\"]))\n except Product.DoesNotExist as e:\n raise Http404(\"Pizza does not exist\") from e\n if not obj:\n obj = FoodOrder(food_event=event, member=request.member)\n obj.product = product\n if obj.payment:\n try:\n delete_payment(obj, member=request.member, ignore_change_window=True)\n except PaymentError:\n messages.error(\n request,\n _(\"Your order could not be updated because it was already paid.\"),\n )\n return redirect(\"pizzas:index\")\n obj.save()\n return redirect(\"pizzas:index\")\n", "path": "website/pizzas/views.py"}]}
| 2,449 | 465 |
gh_patches_debug_39582
|
rasdani/github-patches
|
git_diff
|
microsoft__botbuilder-python-1523
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[PORT] Move TelemetryClient property to DialogContainer
> Port this change from botbuilder-dotnet/master branch:
https://github.com/microsoft/botbuilder-dotnet/pull/4178
Fixes #2638
Move TelemetryClient onto DialogContainer (implementation the same in AdaptiveDialog and ComponentDialog).
# Changed projects
* Microsoft.Bot.Builder.Dialogs.Adaptive
* Microsoft.Bot.Builder.Dialogs
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `libraries/botbuilder-dialogs/botbuilder/dialogs/dialog_set.py`
Content:
```
1 # Copyright (c) Microsoft Corporation. All rights reserved.
2 # Licensed under the MIT License.
3 import inspect
4 from hashlib import sha256
5 from typing import Dict
6
7 from botbuilder.core import TurnContext, BotAssert, StatePropertyAccessor
8 from .dialog import Dialog
9 from .dialog_state import DialogState
10
11
12 class DialogSet:
13 def __init__(self, dialog_state: StatePropertyAccessor = None):
14 # pylint: disable=import-outside-toplevel
15 if dialog_state is None:
16 frame = inspect.currentframe().f_back
17 try:
18 # try to access the caller's "self"
19 try:
20 self_obj = frame.f_locals["self"]
21 except KeyError:
22 raise TypeError("DialogSet(): dialog_state cannot be None.")
23 # Only ComponentDialog can initialize with None dialog_state
24 from .component_dialog import ComponentDialog
25 from .dialog_manager import DialogManager
26 from .dialog_container import DialogContainer
27
28 if not isinstance(
29 self_obj, (ComponentDialog, DialogContainer, DialogManager)
30 ):
31 raise TypeError("DialogSet(): dialog_state cannot be None.")
32 finally:
33 # make sure to clean up the frame at the end to avoid ref cycles
34 del frame
35
36 self._dialog_state = dialog_state
37 # self.__telemetry_client = NullBotTelemetryClient.Instance;
38
39 self._dialogs: Dict[str, Dialog] = {}
40 self._version: str = None
41
42 def get_version(self) -> str:
43 """
44 Gets a unique string which represents the combined versions of all dialogs in this this dialogset.
45 <returns>Version will change when any of the child dialogs version changes.</returns>
46 """
47 if not self._version:
48 version = ""
49 for _, dialog in self._dialogs.items():
50 aux_version = dialog.get_version()
51 if aux_version:
52 version += aux_version
53
54 self._version = sha256(version)
55
56 return self._version
57
58 def add(self, dialog: Dialog):
59 """
60 Adds a new dialog to the set and returns the added dialog.
61 :param dialog: The dialog to add.
62 """
63 if dialog is None or not isinstance(dialog, Dialog):
64 raise TypeError(
65 "DialogSet.add(): dialog cannot be None and must be a Dialog or derived class."
66 )
67
68 if dialog.id in self._dialogs:
69 raise TypeError(
70 "DialogSet.add(): A dialog with an id of '%s' already added."
71 % dialog.id
72 )
73
74 # dialog.telemetry_client = this._telemetry_client;
75 self._dialogs[dialog.id] = dialog
76
77 return self
78
79 async def create_context(self, turn_context: TurnContext) -> "DialogContext":
80 # This import prevents circular dependency issues
81 # pylint: disable=import-outside-toplevel
82 from .dialog_context import DialogContext
83
84 # pylint: disable=unnecessary-lambda
85 BotAssert.context_not_none(turn_context)
86
87 if not self._dialog_state:
88 raise RuntimeError(
89 "DialogSet.CreateContextAsync(): DialogSet created with a null IStatePropertyAccessor."
90 )
91
92 state: DialogState = await self._dialog_state.get(
93 turn_context, lambda: DialogState()
94 )
95
96 return DialogContext(self, turn_context, state)
97
98 async def find(self, dialog_id: str) -> Dialog:
99 """
100 Finds a dialog that was previously added to the set using add(dialog)
101 :param dialog_id: ID of the dialog/prompt to look up.
102 :return: The dialog if found, otherwise null.
103 """
104 if not dialog_id:
105 raise TypeError("DialogContext.find(): dialog_id cannot be None.")
106
107 if dialog_id in self._dialogs:
108 return self._dialogs[dialog_id]
109
110 return None
111
112 def find_dialog(self, dialog_id: str) -> Dialog:
113 """
114 Finds a dialog that was previously added to the set using add(dialog)
115 :param dialog_id: ID of the dialog/prompt to look up.
116 :return: The dialog if found, otherwise null.
117 """
118 if not dialog_id:
119 raise TypeError("DialogContext.find(): dialog_id cannot be None.")
120
121 if dialog_id in self._dialogs:
122 return self._dialogs[dialog_id]
123
124 return None
125
126 def __str__(self):
127 if self._dialogs:
128 return "dialog set empty!"
129 return " ".join(map(str, self._dialogs.keys()))
130
```
Path: `libraries/botbuilder-dialogs/botbuilder/dialogs/dialog_container.py`
Content:
```
1 # Copyright (c) Microsoft Corporation. All rights reserved.
2 # Licensed under the MIT License.
3
4 from abc import ABC, abstractmethod
5
6
7 from .dialog import Dialog
8 from .dialog_context import DialogContext
9 from .dialog_event import DialogEvent
10 from .dialog_events import DialogEvents
11 from .dialog_set import DialogSet
12
13
14 class DialogContainer(Dialog, ABC):
15 def __init__(self, dialog_id: str = None):
16 super().__init__(dialog_id)
17
18 self.dialogs = DialogSet()
19
20 @abstractmethod
21 def create_child_context(self, dialog_context: DialogContext) -> DialogContext:
22 raise NotImplementedError()
23
24 def find_dialog(self, dialog_id: str) -> Dialog:
25 # TODO: deprecate DialogSet.find
26 return self.dialogs.find_dialog(dialog_id)
27
28 async def on_dialog_event(
29 self, dialog_context: DialogContext, dialog_event: DialogEvent
30 ) -> bool:
31 """
32 Called when an event has been raised, using `DialogContext.emitEvent()`, by either the current dialog or a
33 dialog that the current dialog started.
34 :param dialog_context: The dialog context for the current turn of conversation.
35 :param dialog_event: The event being raised.
36 :return: True if the event is handled by the current dialog and bubbling should stop.
37 """
38 handled = await super().on_dialog_event(dialog_context, dialog_event)
39
40 # Trace unhandled "versionChanged" events.
41 if not handled and dialog_event.name == DialogEvents.version_changed:
42
43 trace_message = (
44 f"Unhandled dialog event: {dialog_event.name}. Active Dialog: "
45 f"{dialog_context.active_dialog.id}"
46 )
47
48 await dialog_context.context.send_trace_activity(trace_message)
49
50 return handled
51
52 def get_internal_version(self) -> str:
53 """
54 GetInternalVersion - Returns internal version identifier for this container.
55 DialogContainers detect changes of all sub-components in the container and map that to an DialogChanged event.
56 Because they do this, DialogContainers "hide" the internal changes and just have the .id. This isolates changes
57 to the container level unless a container doesn't handle it. To support this DialogContainers define a
58 protected virtual method GetInternalVersion() which computes if this dialog or child dialogs have changed
59 which is then examined via calls to check_for_version_change_async().
60 :return: version which represents the change of the internals of this container.
61 """
62 return self.dialogs.get_version()
63
64 async def check_for_version_change_async(self, dialog_context: DialogContext):
65 """
66 :param dialog_context: dialog context.
67 :return: task.
68 Checks to see if a containers child dialogs have changed since the current dialog instance
69 was started.
70
71 This should be called at the start of `beginDialog()`, `continueDialog()`, and `resumeDialog()`.
72 """
73 current = dialog_context.active_dialog.version
74 dialog_context.active_dialog.version = self.get_internal_version()
75
76 # Check for change of previously stored hash
77 if current and current != dialog_context.active_dialog.version:
78 # Give bot an opportunity to handle the change.
79 # - If bot handles it the changeHash will have been updated as to avoid triggering the
80 # change again.
81 await dialog_context.emit_event(
82 DialogEvents.version_changed, self.id, True, False
83 )
84
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/libraries/botbuilder-dialogs/botbuilder/dialogs/dialog_container.py b/libraries/botbuilder-dialogs/botbuilder/dialogs/dialog_container.py
--- a/libraries/botbuilder-dialogs/botbuilder/dialogs/dialog_container.py
+++ b/libraries/botbuilder-dialogs/botbuilder/dialogs/dialog_container.py
@@ -4,6 +4,7 @@
from abc import ABC, abstractmethod
+from botbuilder.core import NullTelemetryClient, BotTelemetryClient
from .dialog import Dialog
from .dialog_context import DialogContext
from .dialog_event import DialogEvent
@@ -17,6 +18,31 @@
self.dialogs = DialogSet()
+ @property
+ def telemetry_client(self) -> BotTelemetryClient:
+ """
+ Gets the telemetry client for logging events.
+ """
+ return self._telemetry_client
+
+ @telemetry_client.setter
+ def telemetry_client(self, value: BotTelemetryClient) -> None:
+ """
+ Sets the telemetry client for all dialogs in this set.
+ """
+ if value is None:
+ self._telemetry_client = NullTelemetryClient()
+ else:
+ self._telemetry_client = value
+
+ # Care! Dialogs.TelemetryClient assignment internally assigns the
+ # TelemetryClient for each dialog which could lead to an eventual stack
+ # overflow in cyclical dialog structures.
+ # Don't set the telemetry client if the candidate instance is the same as
+ # the currently set one.
+ if self.dialogs.telemetry_client != value:
+ self.dialogs.telemetry_client = self._telemetry_client
+
@abstractmethod
def create_child_context(self, dialog_context: DialogContext) -> DialogContext:
raise NotImplementedError()
diff --git a/libraries/botbuilder-dialogs/botbuilder/dialogs/dialog_set.py b/libraries/botbuilder-dialogs/botbuilder/dialogs/dialog_set.py
--- a/libraries/botbuilder-dialogs/botbuilder/dialogs/dialog_set.py
+++ b/libraries/botbuilder-dialogs/botbuilder/dialogs/dialog_set.py
@@ -4,7 +4,13 @@
from hashlib import sha256
from typing import Dict
-from botbuilder.core import TurnContext, BotAssert, StatePropertyAccessor
+from botbuilder.core import (
+ NullTelemetryClient,
+ BotTelemetryClient,
+ TurnContext,
+ BotAssert,
+ StatePropertyAccessor,
+)
from .dialog import Dialog
from .dialog_state import DialogState
@@ -34,11 +40,31 @@
del frame
self._dialog_state = dialog_state
- # self.__telemetry_client = NullBotTelemetryClient.Instance;
+ self.__telemetry_client = NullTelemetryClient()
self._dialogs: Dict[str, Dialog] = {}
self._version: str = None
+ @property
+ def telemetry_client(self) -> BotTelemetryClient:
+ """
+ Gets the telemetry client for logging events.
+ """
+ return self.__telemetry_client
+
+ @telemetry_client.setter
+ def telemetry_client(self, value: BotTelemetryClient) -> None:
+ """
+ Sets the telemetry client for all dialogs in this set.
+ """
+ if value is None:
+ self.__telemetry_client = NullTelemetryClient()
+ else:
+ self.__telemetry_client = value
+
+ for dialog in self._dialogs.values():
+ dialog.telemetry_client = self.__telemetry_client
+
def get_version(self) -> str:
"""
Gets a unique string which represents the combined versions of all dialogs in this this dialogset.
|
{"golden_diff": "diff --git a/libraries/botbuilder-dialogs/botbuilder/dialogs/dialog_container.py b/libraries/botbuilder-dialogs/botbuilder/dialogs/dialog_container.py\n--- a/libraries/botbuilder-dialogs/botbuilder/dialogs/dialog_container.py\n+++ b/libraries/botbuilder-dialogs/botbuilder/dialogs/dialog_container.py\n@@ -4,6 +4,7 @@\n from abc import ABC, abstractmethod\n \n \n+from botbuilder.core import NullTelemetryClient, BotTelemetryClient\n from .dialog import Dialog\n from .dialog_context import DialogContext\n from .dialog_event import DialogEvent\n@@ -17,6 +18,31 @@\n \n self.dialogs = DialogSet()\n \n+ @property\n+ def telemetry_client(self) -> BotTelemetryClient:\n+ \"\"\"\n+ Gets the telemetry client for logging events.\n+ \"\"\"\n+ return self._telemetry_client\n+\n+ @telemetry_client.setter\n+ def telemetry_client(self, value: BotTelemetryClient) -> None:\n+ \"\"\"\n+ Sets the telemetry client for all dialogs in this set.\n+ \"\"\"\n+ if value is None:\n+ self._telemetry_client = NullTelemetryClient()\n+ else:\n+ self._telemetry_client = value\n+\n+ # Care! Dialogs.TelemetryClient assignment internally assigns the\n+ # TelemetryClient for each dialog which could lead to an eventual stack\n+ # overflow in cyclical dialog structures.\n+ # Don't set the telemetry client if the candidate instance is the same as\n+ # the currently set one.\n+ if self.dialogs.telemetry_client != value:\n+ self.dialogs.telemetry_client = self._telemetry_client\n+\n @abstractmethod\n def create_child_context(self, dialog_context: DialogContext) -> DialogContext:\n raise NotImplementedError()\ndiff --git a/libraries/botbuilder-dialogs/botbuilder/dialogs/dialog_set.py b/libraries/botbuilder-dialogs/botbuilder/dialogs/dialog_set.py\n--- a/libraries/botbuilder-dialogs/botbuilder/dialogs/dialog_set.py\n+++ b/libraries/botbuilder-dialogs/botbuilder/dialogs/dialog_set.py\n@@ -4,7 +4,13 @@\n from hashlib import sha256\n from typing import Dict\n \n-from botbuilder.core import TurnContext, BotAssert, StatePropertyAccessor\n+from botbuilder.core import (\n+ NullTelemetryClient,\n+ BotTelemetryClient,\n+ TurnContext,\n+ BotAssert,\n+ StatePropertyAccessor,\n+)\n from .dialog import Dialog\n from .dialog_state import DialogState\n \n@@ -34,11 +40,31 @@\n del frame\n \n self._dialog_state = dialog_state\n- # self.__telemetry_client = NullBotTelemetryClient.Instance;\n+ self.__telemetry_client = NullTelemetryClient()\n \n self._dialogs: Dict[str, Dialog] = {}\n self._version: str = None\n \n+ @property\n+ def telemetry_client(self) -> BotTelemetryClient:\n+ \"\"\"\n+ Gets the telemetry client for logging events.\n+ \"\"\"\n+ return self.__telemetry_client\n+\n+ @telemetry_client.setter\n+ def telemetry_client(self, value: BotTelemetryClient) -> None:\n+ \"\"\"\n+ Sets the telemetry client for all dialogs in this set.\n+ \"\"\"\n+ if value is None:\n+ self.__telemetry_client = NullTelemetryClient()\n+ else:\n+ self.__telemetry_client = value\n+\n+ for dialog in self._dialogs.values():\n+ dialog.telemetry_client = self.__telemetry_client\n+\n def get_version(self) -> str:\n \"\"\"\n Gets a unique string which represents the combined versions of all dialogs in this this dialogset.\n", "issue": "[PORT] Move TelemetryClient property to DialogContainer\n> Port this change from botbuilder-dotnet/master branch:\nhttps://github.com/microsoft/botbuilder-dotnet/pull/4178\n\nFixes #2638 \r\n\r\nMove TelemetryClient onto DialogContainer (implementation the same in AdaptiveDialog and ComponentDialog).\n\n\r\n# Changed projects\r\n* Microsoft.Bot.Builder.Dialogs.Adaptive\r\n* Microsoft.Bot.Builder.Dialogs\r\n\r\n\r\n\n\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\nimport inspect\nfrom hashlib import sha256\nfrom typing import Dict\n\nfrom botbuilder.core import TurnContext, BotAssert, StatePropertyAccessor\nfrom .dialog import Dialog\nfrom .dialog_state import DialogState\n\n\nclass DialogSet:\n def __init__(self, dialog_state: StatePropertyAccessor = None):\n # pylint: disable=import-outside-toplevel\n if dialog_state is None:\n frame = inspect.currentframe().f_back\n try:\n # try to access the caller's \"self\"\n try:\n self_obj = frame.f_locals[\"self\"]\n except KeyError:\n raise TypeError(\"DialogSet(): dialog_state cannot be None.\")\n # Only ComponentDialog can initialize with None dialog_state\n from .component_dialog import ComponentDialog\n from .dialog_manager import DialogManager\n from .dialog_container import DialogContainer\n\n if not isinstance(\n self_obj, (ComponentDialog, DialogContainer, DialogManager)\n ):\n raise TypeError(\"DialogSet(): dialog_state cannot be None.\")\n finally:\n # make sure to clean up the frame at the end to avoid ref cycles\n del frame\n\n self._dialog_state = dialog_state\n # self.__telemetry_client = NullBotTelemetryClient.Instance;\n\n self._dialogs: Dict[str, Dialog] = {}\n self._version: str = None\n\n def get_version(self) -> str:\n \"\"\"\n Gets a unique string which represents the combined versions of all dialogs in this this dialogset.\n <returns>Version will change when any of the child dialogs version changes.</returns>\n \"\"\"\n if not self._version:\n version = \"\"\n for _, dialog in self._dialogs.items():\n aux_version = dialog.get_version()\n if aux_version:\n version += aux_version\n\n self._version = sha256(version)\n\n return self._version\n\n def add(self, dialog: Dialog):\n \"\"\"\n Adds a new dialog to the set and returns the added dialog.\n :param dialog: The dialog to add.\n \"\"\"\n if dialog is None or not isinstance(dialog, Dialog):\n raise TypeError(\n \"DialogSet.add(): dialog cannot be None and must be a Dialog or derived class.\"\n )\n\n if dialog.id in self._dialogs:\n raise TypeError(\n \"DialogSet.add(): A dialog with an id of '%s' already added.\"\n % dialog.id\n )\n\n # dialog.telemetry_client = this._telemetry_client;\n self._dialogs[dialog.id] = dialog\n\n return self\n\n async def create_context(self, turn_context: TurnContext) -> \"DialogContext\":\n # This import prevents circular dependency issues\n # pylint: disable=import-outside-toplevel\n from .dialog_context import DialogContext\n\n # pylint: disable=unnecessary-lambda\n BotAssert.context_not_none(turn_context)\n\n if not self._dialog_state:\n raise RuntimeError(\n \"DialogSet.CreateContextAsync(): DialogSet created with a null IStatePropertyAccessor.\"\n )\n\n state: DialogState = await self._dialog_state.get(\n turn_context, lambda: DialogState()\n )\n\n return DialogContext(self, turn_context, state)\n\n async def find(self, dialog_id: str) -> Dialog:\n \"\"\"\n Finds a dialog that was previously added to the set using add(dialog)\n :param dialog_id: ID of the dialog/prompt to look up.\n :return: The dialog if found, otherwise null.\n \"\"\"\n if not dialog_id:\n raise TypeError(\"DialogContext.find(): dialog_id cannot be None.\")\n\n if dialog_id in self._dialogs:\n return self._dialogs[dialog_id]\n\n return None\n\n def find_dialog(self, dialog_id: str) -> Dialog:\n \"\"\"\n Finds a dialog that was previously added to the set using add(dialog)\n :param dialog_id: ID of the dialog/prompt to look up.\n :return: The dialog if found, otherwise null.\n \"\"\"\n if not dialog_id:\n raise TypeError(\"DialogContext.find(): dialog_id cannot be None.\")\n\n if dialog_id in self._dialogs:\n return self._dialogs[dialog_id]\n\n return None\n\n def __str__(self):\n if self._dialogs:\n return \"dialog set empty!\"\n return \" \".join(map(str, self._dialogs.keys()))\n", "path": "libraries/botbuilder-dialogs/botbuilder/dialogs/dialog_set.py"}, {"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\nfrom abc import ABC, abstractmethod\n\n\nfrom .dialog import Dialog\nfrom .dialog_context import DialogContext\nfrom .dialog_event import DialogEvent\nfrom .dialog_events import DialogEvents\nfrom .dialog_set import DialogSet\n\n\nclass DialogContainer(Dialog, ABC):\n def __init__(self, dialog_id: str = None):\n super().__init__(dialog_id)\n\n self.dialogs = DialogSet()\n\n @abstractmethod\n def create_child_context(self, dialog_context: DialogContext) -> DialogContext:\n raise NotImplementedError()\n\n def find_dialog(self, dialog_id: str) -> Dialog:\n # TODO: deprecate DialogSet.find\n return self.dialogs.find_dialog(dialog_id)\n\n async def on_dialog_event(\n self, dialog_context: DialogContext, dialog_event: DialogEvent\n ) -> bool:\n \"\"\"\n Called when an event has been raised, using `DialogContext.emitEvent()`, by either the current dialog or a\n dialog that the current dialog started.\n :param dialog_context: The dialog context for the current turn of conversation.\n :param dialog_event: The event being raised.\n :return: True if the event is handled by the current dialog and bubbling should stop.\n \"\"\"\n handled = await super().on_dialog_event(dialog_context, dialog_event)\n\n # Trace unhandled \"versionChanged\" events.\n if not handled and dialog_event.name == DialogEvents.version_changed:\n\n trace_message = (\n f\"Unhandled dialog event: {dialog_event.name}. Active Dialog: \"\n f\"{dialog_context.active_dialog.id}\"\n )\n\n await dialog_context.context.send_trace_activity(trace_message)\n\n return handled\n\n def get_internal_version(self) -> str:\n \"\"\"\n GetInternalVersion - Returns internal version identifier for this container.\n DialogContainers detect changes of all sub-components in the container and map that to an DialogChanged event.\n Because they do this, DialogContainers \"hide\" the internal changes and just have the .id. This isolates changes\n to the container level unless a container doesn't handle it. To support this DialogContainers define a\n protected virtual method GetInternalVersion() which computes if this dialog or child dialogs have changed\n which is then examined via calls to check_for_version_change_async().\n :return: version which represents the change of the internals of this container.\n \"\"\"\n return self.dialogs.get_version()\n\n async def check_for_version_change_async(self, dialog_context: DialogContext):\n \"\"\"\n :param dialog_context: dialog context.\n :return: task.\n Checks to see if a containers child dialogs have changed since the current dialog instance\n was started.\n\n This should be called at the start of `beginDialog()`, `continueDialog()`, and `resumeDialog()`.\n \"\"\"\n current = dialog_context.active_dialog.version\n dialog_context.active_dialog.version = self.get_internal_version()\n\n # Check for change of previously stored hash\n if current and current != dialog_context.active_dialog.version:\n # Give bot an opportunity to handle the change.\n # - If bot handles it the changeHash will have been updated as to avoid triggering the\n # change again.\n await dialog_context.emit_event(\n DialogEvents.version_changed, self.id, True, False\n )\n", "path": "libraries/botbuilder-dialogs/botbuilder/dialogs/dialog_container.py"}], "after_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\nimport inspect\nfrom hashlib import sha256\nfrom typing import Dict\n\nfrom botbuilder.core import (\n NullTelemetryClient,\n BotTelemetryClient,\n TurnContext,\n BotAssert,\n StatePropertyAccessor,\n)\nfrom .dialog import Dialog\nfrom .dialog_state import DialogState\n\n\nclass DialogSet:\n def __init__(self, dialog_state: StatePropertyAccessor = None):\n # pylint: disable=import-outside-toplevel\n if dialog_state is None:\n frame = inspect.currentframe().f_back\n try:\n # try to access the caller's \"self\"\n try:\n self_obj = frame.f_locals[\"self\"]\n except KeyError:\n raise TypeError(\"DialogSet(): dialog_state cannot be None.\")\n # Only ComponentDialog can initialize with None dialog_state\n from .component_dialog import ComponentDialog\n from .dialog_manager import DialogManager\n from .dialog_container import DialogContainer\n\n if not isinstance(\n self_obj, (ComponentDialog, DialogContainer, DialogManager)\n ):\n raise TypeError(\"DialogSet(): dialog_state cannot be None.\")\n finally:\n # make sure to clean up the frame at the end to avoid ref cycles\n del frame\n\n self._dialog_state = dialog_state\n self.__telemetry_client = NullTelemetryClient()\n\n self._dialogs: Dict[str, Dialog] = {}\n self._version: str = None\n\n @property\n def telemetry_client(self) -> BotTelemetryClient:\n \"\"\"\n Gets the telemetry client for logging events.\n \"\"\"\n return self.__telemetry_client\n\n @telemetry_client.setter\n def telemetry_client(self, value: BotTelemetryClient) -> None:\n \"\"\"\n Sets the telemetry client for all dialogs in this set.\n \"\"\"\n if value is None:\n self.__telemetry_client = NullTelemetryClient()\n else:\n self.__telemetry_client = value\n\n for dialog in self._dialogs.values():\n dialog.telemetry_client = self.__telemetry_client\n\n def get_version(self) -> str:\n \"\"\"\n Gets a unique string which represents the combined versions of all dialogs in this this dialogset.\n <returns>Version will change when any of the child dialogs version changes.</returns>\n \"\"\"\n if not self._version:\n version = \"\"\n for _, dialog in self._dialogs.items():\n aux_version = dialog.get_version()\n if aux_version:\n version += aux_version\n\n self._version = sha256(version)\n\n return self._version\n\n def add(self, dialog: Dialog):\n \"\"\"\n Adds a new dialog to the set and returns the added dialog.\n :param dialog: The dialog to add.\n \"\"\"\n if dialog is None or not isinstance(dialog, Dialog):\n raise TypeError(\n \"DialogSet.add(): dialog cannot be None and must be a Dialog or derived class.\"\n )\n\n if dialog.id in self._dialogs:\n raise TypeError(\n \"DialogSet.add(): A dialog with an id of '%s' already added.\"\n % dialog.id\n )\n\n # dialog.telemetry_client = this._telemetry_client;\n self._dialogs[dialog.id] = dialog\n\n return self\n\n async def create_context(self, turn_context: TurnContext) -> \"DialogContext\":\n # This import prevents circular dependency issues\n # pylint: disable=import-outside-toplevel\n from .dialog_context import DialogContext\n\n # pylint: disable=unnecessary-lambda\n BotAssert.context_not_none(turn_context)\n\n if not self._dialog_state:\n raise RuntimeError(\n \"DialogSet.CreateContextAsync(): DialogSet created with a null IStatePropertyAccessor.\"\n )\n\n state: DialogState = await self._dialog_state.get(\n turn_context, lambda: DialogState()\n )\n\n return DialogContext(self, turn_context, state)\n\n async def find(self, dialog_id: str) -> Dialog:\n \"\"\"\n Finds a dialog that was previously added to the set using add(dialog)\n :param dialog_id: ID of the dialog/prompt to look up.\n :return: The dialog if found, otherwise null.\n \"\"\"\n if not dialog_id:\n raise TypeError(\"DialogContext.find(): dialog_id cannot be None.\")\n\n if dialog_id in self._dialogs:\n return self._dialogs[dialog_id]\n\n return None\n\n def find_dialog(self, dialog_id: str) -> Dialog:\n \"\"\"\n Finds a dialog that was previously added to the set using add(dialog)\n :param dialog_id: ID of the dialog/prompt to look up.\n :return: The dialog if found, otherwise null.\n \"\"\"\n if not dialog_id:\n raise TypeError(\"DialogContext.find(): dialog_id cannot be None.\")\n\n if dialog_id in self._dialogs:\n return self._dialogs[dialog_id]\n\n return None\n\n def __str__(self):\n if self._dialogs:\n return \"dialog set empty!\"\n return \" \".join(map(str, self._dialogs.keys()))\n", "path": "libraries/botbuilder-dialogs/botbuilder/dialogs/dialog_set.py"}, {"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\nfrom abc import ABC, abstractmethod\n\n\nfrom botbuilder.core import NullTelemetryClient, BotTelemetryClient\nfrom .dialog import Dialog\nfrom .dialog_context import DialogContext\nfrom .dialog_event import DialogEvent\nfrom .dialog_events import DialogEvents\nfrom .dialog_set import DialogSet\n\n\nclass DialogContainer(Dialog, ABC):\n def __init__(self, dialog_id: str = None):\n super().__init__(dialog_id)\n\n self.dialogs = DialogSet()\n\n @property\n def telemetry_client(self) -> BotTelemetryClient:\n \"\"\"\n Gets the telemetry client for logging events.\n \"\"\"\n return self._telemetry_client\n\n @telemetry_client.setter\n def telemetry_client(self, value: BotTelemetryClient) -> None:\n \"\"\"\n Sets the telemetry client for all dialogs in this set.\n \"\"\"\n if value is None:\n self._telemetry_client = NullTelemetryClient()\n else:\n self._telemetry_client = value\n\n # Care! Dialogs.TelemetryClient assignment internally assigns the\n # TelemetryClient for each dialog which could lead to an eventual stack\n # overflow in cyclical dialog structures.\n # Don't set the telemetry client if the candidate instance is the same as\n # the currently set one.\n if self.dialogs.telemetry_client != value:\n self.dialogs.telemetry_client = self._telemetry_client\n\n @abstractmethod\n def create_child_context(self, dialog_context: DialogContext) -> DialogContext:\n raise NotImplementedError()\n\n def find_dialog(self, dialog_id: str) -> Dialog:\n # TODO: deprecate DialogSet.find\n return self.dialogs.find_dialog(dialog_id)\n\n async def on_dialog_event(\n self, dialog_context: DialogContext, dialog_event: DialogEvent\n ) -> bool:\n \"\"\"\n Called when an event has been raised, using `DialogContext.emitEvent()`, by either the current dialog or a\n dialog that the current dialog started.\n :param dialog_context: The dialog context for the current turn of conversation.\n :param dialog_event: The event being raised.\n :return: True if the event is handled by the current dialog and bubbling should stop.\n \"\"\"\n handled = await super().on_dialog_event(dialog_context, dialog_event)\n\n # Trace unhandled \"versionChanged\" events.\n if not handled and dialog_event.name == DialogEvents.version_changed:\n\n trace_message = (\n f\"Unhandled dialog event: {dialog_event.name}. Active Dialog: \"\n f\"{dialog_context.active_dialog.id}\"\n )\n\n await dialog_context.context.send_trace_activity(trace_message)\n\n return handled\n\n def get_internal_version(self) -> str:\n \"\"\"\n GetInternalVersion - Returns internal version identifier for this container.\n DialogContainers detect changes of all sub-components in the container and map that to an DialogChanged event.\n Because they do this, DialogContainers \"hide\" the internal changes and just have the .id. This isolates changes\n to the container level unless a container doesn't handle it. To support this DialogContainers define a\n protected virtual method GetInternalVersion() which computes if this dialog or child dialogs have changed\n which is then examined via calls to check_for_version_change_async().\n :return: version which represents the change of the internals of this container.\n \"\"\"\n return self.dialogs.get_version()\n\n async def check_for_version_change_async(self, dialog_context: DialogContext):\n \"\"\"\n :param dialog_context: dialog context.\n :return: task.\n Checks to see if a containers child dialogs have changed since the current dialog instance\n was started.\n\n This should be called at the start of `beginDialog()`, `continueDialog()`, and `resumeDialog()`.\n \"\"\"\n current = dialog_context.active_dialog.version\n dialog_context.active_dialog.version = self.get_internal_version()\n\n # Check for change of previously stored hash\n if current and current != dialog_context.active_dialog.version:\n # Give bot an opportunity to handle the change.\n # - If bot handles it the changeHash will have been updated as to avoid triggering the\n # change again.\n await dialog_context.emit_event(\n DialogEvents.version_changed, self.id, True, False\n )\n", "path": "libraries/botbuilder-dialogs/botbuilder/dialogs/dialog_container.py"}]}
| 2,485 | 816 |
gh_patches_debug_1325
|
rasdani/github-patches
|
git_diff
|
pypa__setuptools-3307
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Docs] Clarify that "Keywords" page is an API reference for `setuptools.setup`
### Summary
https://setuptools.readthedocs.io/en/latest/references/keywords.html has no indicators what those keywords are for.
It also doesn't show up if you search for "setuptools.setup" in the sidebar search, and is generally innaccessible via search engines as well. It also has poor formatting, likely due to mismanaged indentation on the page.
### OS / Environment
_No response_
### Additional Information
It would be good to:
- have an introductory section in the page, describing what it is for -- explicitly mention "setuptools.setup" in this as well.
- provide anchors for each of the keyword arguments.
- fix the indentation for the list of various arguments.
### Code of Conduct
- [X] I agree to follow the PSF Code of Conduct
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docs/conf.py`
Content:
```
1 extensions = ['sphinx.ext.autodoc', 'jaraco.packaging.sphinx', 'rst.linker']
2
3 master_doc = "index"
4
5 link_files = {
6 '../CHANGES.rst': dict(
7 using=dict(
8 BB='https://bitbucket.org',
9 GH='https://github.com',
10 ),
11 replace=[
12 dict(
13 pattern=r'(?<!\w)PR #(?P<pull>\d+)',
14 url='{package_url}/pull/{pull}',
15 ),
16 dict(
17 pattern=r'(?<!\w)(Issue )?#(?P<issue>\d+)',
18 url='{package_url}/issues/{issue}',
19 ),
20 dict(
21 pattern=r'BB Pull Request ?#(?P<bb_pull_request>\d+)',
22 url='{BB}/pypa/setuptools/pull-request/{bb_pull_request}',
23 ),
24 dict(
25 pattern=r'Distribute #(?P<distribute>\d+)',
26 url='{BB}/tarek/distribute/issue/{distribute}',
27 ),
28 dict(
29 pattern=r'Buildout #(?P<buildout>\d+)',
30 url='{GH}/buildout/buildout/issues/{buildout}',
31 ),
32 dict(
33 pattern=r'Old Setuptools #(?P<old_setuptools>\d+)',
34 url='http://bugs.python.org/setuptools/issue{old_setuptools}',
35 ),
36 dict(
37 pattern=r'Jython #(?P<jython>\d+)',
38 url='http://bugs.jython.org/issue{jython}',
39 ),
40 dict(
41 pattern=r'(Python #|bpo-)(?P<python>\d+)',
42 url='http://bugs.python.org/issue{python}',
43 ),
44 dict(
45 pattern=r'Interop #(?P<interop>\d+)',
46 url='{GH}/pypa/interoperability-peps/issues/{interop}',
47 ),
48 dict(
49 pattern=r'Pip #(?P<pip>\d+)',
50 url='{GH}/pypa/pip/issues/{pip}',
51 ),
52 dict(
53 pattern=r'Packaging #(?P<packaging>\d+)',
54 url='{GH}/pypa/packaging/issues/{packaging}',
55 ),
56 dict(
57 pattern=r'[Pp]ackaging (?P<packaging_ver>\d+(\.\d+)+)',
58 url='{GH}/pypa/packaging/blob/{packaging_ver}/CHANGELOG.rst',
59 ),
60 dict(
61 pattern=r'(?<![`/\w])PEP[- ](?P<pep_number>\d+)',
62 url='https://www.python.org/dev/peps/pep-{pep_number:0>4}/',
63 ),
64 dict(
65 pattern=r'setuptools_svn #(?P<setuptools_svn>\d+)',
66 url='{GH}/jaraco/setuptools_svn/issues/{setuptools_svn}',
67 ),
68 dict(
69 pattern=r'pypa/(?P<issue_repo>[\-\.\w]+)#(?P<issue_number>\d+)',
70 url='{GH}/pypa/{issue_repo}/issues/{issue_number}',
71 ),
72 dict(
73 pattern=r'pypa/(?P<commit_repo>[\-\.\w]+)@(?P<commit_number>[\da-f]+)',
74 url='{GH}/pypa/{commit_repo}/commit/{commit_number}',
75 ),
76 dict(
77 pattern=r'^(?m)((?P<scm_version>v?\d+(\.\d+){1,2}))\n[-=]+\n',
78 with_scm='{text}\n{rev[timestamp]:%d %b %Y}\n',
79 ),
80 ],
81 ),
82 }
83
84 # Be strict about any broken references:
85 nitpicky = True
86
87 # Include Python intersphinx mapping to prevent failures
88 # jaraco/skeleton#51
89 extensions += ['sphinx.ext.intersphinx']
90 intersphinx_mapping = {
91 'python': ('https://docs.python.org/3', None),
92 }
93
94 intersphinx_mapping.update({
95 'pypa-build': ('https://pypa-build.readthedocs.io/en/latest/', None)
96 })
97
98 # Add support for linking usernames
99 github_url = 'https://github.com'
100 github_repo_org = 'pypa'
101 github_repo_name = 'setuptools'
102 github_repo_slug = f'{github_repo_org}/{github_repo_name}'
103 github_repo_url = f'{github_url}/{github_repo_slug}'
104 github_sponsors_url = f'{github_url}/sponsors'
105 extlinks = {
106 'user': (f'{github_sponsors_url}/%s', '@'), # noqa: WPS323
107 'pypi': ('https://pypi.org/project/%s', '%s'), # noqa: WPS323
108 'wiki': ('https://wikipedia.org/wiki/%s', '%s'), # noqa: WPS323
109 }
110 extensions += ['sphinx.ext.extlinks']
111
112 # Ref: https://github.com/python-attrs/attrs/pull/571/files\
113 # #diff-85987f48f1258d9ee486e3191495582dR82
114 default_role = 'any'
115
116 # HTML theme
117 html_theme = 'furo'
118 html_logo = "images/logo.svg"
119
120 html_theme_options = {
121 "sidebar_hide_name": True,
122 "light_css_variables": {
123 "color-brand-primary": "#336790", # "blue"
124 "color-brand-content": "#336790",
125 },
126 "dark_css_variables": {
127 "color-brand-primary": "#E5B62F", # "yellow"
128 "color-brand-content": "#E5B62F",
129 },
130 }
131
132 # Add support for inline tabs
133 extensions += ['sphinx_inline_tabs']
134
135 # Support for distutils
136
137 # Ref: https://stackoverflow.com/a/30624034/595220
138 nitpick_ignore = [
139 ('c:func', 'SHGetSpecialFolderPath'), # ref to MS docs
140 ('envvar', 'DISTUTILS_DEBUG'), # undocumented
141 ('envvar', 'HOME'), # undocumented
142 ('envvar', 'PLAT'), # undocumented
143 ('py:attr', 'CCompiler.language_map'), # undocumented
144 ('py:attr', 'CCompiler.language_order'), # undocumented
145 ('py:class', 'distutils.dist.Distribution'), # undocumented
146 ('py:class', 'distutils.extension.Extension'), # undocumented
147 ('py:class', 'BorlandCCompiler'), # undocumented
148 ('py:class', 'CCompiler'), # undocumented
149 ('py:class', 'CygwinCCompiler'), # undocumented
150 ('py:class', 'distutils.dist.DistributionMetadata'), # undocumented
151 ('py:class', 'FileList'), # undocumented
152 ('py:class', 'IShellLink'), # ref to MS docs
153 ('py:class', 'MSVCCompiler'), # undocumented
154 ('py:class', 'OptionDummy'), # undocumented
155 ('py:class', 'UnixCCompiler'), # undocumented
156 ('py:exc', 'CompileError'), # undocumented
157 ('py:exc', 'DistutilsExecError'), # undocumented
158 ('py:exc', 'DistutilsFileError'), # undocumented
159 ('py:exc', 'LibError'), # undocumented
160 ('py:exc', 'LinkError'), # undocumented
161 ('py:exc', 'PreprocessError'), # undocumented
162 ('py:func', 'distutils.CCompiler.new_compiler'), # undocumented
163 # undocumented:
164 ('py:func', 'distutils.dist.DistributionMetadata.read_pkg_file'),
165 ('py:func', 'distutils.file_util._copy_file_contents'), # undocumented
166 ('py:func', 'distutils.log.debug'), # undocumented
167 ('py:func', 'distutils.spawn.find_executable'), # undocumented
168 ('py:func', 'distutils.spawn.spawn'), # undocumented
169 # TODO: check https://docutils.rtfd.io in the future
170 ('py:mod', 'docutils'), # there's no Sphinx site documenting this
171 ]
172
173 # Allow linking objects on other Sphinx sites seamlessly:
174 intersphinx_mapping.update(
175 python2=('https://docs.python.org/2', None),
176 python=('https://docs.python.org/3', None),
177 )
178
179 # Add support for the unreleased "next-version" change notes
180 extensions += ['sphinxcontrib.towncrier']
181 # Extension needs a path from here to the towncrier config.
182 towncrier_draft_working_directory = '..'
183 # Avoid an empty section for unpublished changes.
184 towncrier_draft_include_empty = False
185
186 extensions += ['jaraco.tidelift']
187
188 # Add icons (aka "favicons") to documentation
189 extensions += ['sphinx-favicon']
190 html_static_path = ['images'] # should contain the folder with icons
191
192 # List of dicts with <link> HTML attributes
193 # static-file points to files in the html_static_path (href is computed)
194 favicons = [
195 { # "Catch-all" goes first, otherwise some browsers will overwrite
196 "rel": "icon",
197 "type": "image/svg+xml",
198 "static-file": "logo-symbol-only.svg",
199 "sizes": "any"
200 },
201 { # Version with thicker strokes for better visibility at smaller sizes
202 "rel": "icon",
203 "type": "image/svg+xml",
204 "static-file": "favicon.svg",
205 "sizes": "16x16 24x24 32x32 48x48"
206 },
207 # rel="apple-touch-icon" does not support SVG yet
208 ]
209
210 intersphinx_mapping['pip'] = 'https://pip.pypa.io/en/latest', None
211 intersphinx_mapping['PyPUG'] = ('https://packaging.python.org/en/latest/', None)
212 intersphinx_mapping['packaging'] = ('https://packaging.pypa.io/en/latest/', None)
213 intersphinx_mapping['importlib-resources'] = (
214 'https://importlib-resources.readthedocs.io/en/latest', None
215 )
216
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -129,6 +129,12 @@
},
}
+# Redirect old docs so links and references in the ecosystem don't break
+extensions += ['sphinx_reredirects']
+redirects = {
+ "userguide/keywords": "/deprecated/changed_keywords.html",
+}
+
# Add support for inline tabs
extensions += ['sphinx_inline_tabs']
|
{"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -129,6 +129,12 @@\n },\n }\n \n+# Redirect old docs so links and references in the ecosystem don't break\n+extensions += ['sphinx_reredirects']\n+redirects = {\n+ \"userguide/keywords\": \"/deprecated/changed_keywords.html\",\n+}\n+\n # Add support for inline tabs\n extensions += ['sphinx_inline_tabs']\n", "issue": "[Docs] Clarify that \"Keywords\" page is an API reference for `setuptools.setup`\n### Summary\n\nhttps://setuptools.readthedocs.io/en/latest/references/keywords.html has no indicators what those keywords are for.\r\n\r\nIt also doesn't show up if you search for \"setuptools.setup\" in the sidebar search, and is generally innaccessible via search engines as well. It also has poor formatting, likely due to mismanaged indentation on the page.\n\n### OS / Environment\n\n_No response_\n\n### Additional Information\n\nIt would be good to:\r\n\r\n- have an introductory section in the page, describing what it is for -- explicitly mention \"setuptools.setup\" in this as well.\r\n- provide anchors for each of the keyword arguments.\r\n- fix the indentation for the list of various arguments.\r\n\n\n### Code of Conduct\n\n- [X] I agree to follow the PSF Code of Conduct\n", "before_files": [{"content": "extensions = ['sphinx.ext.autodoc', 'jaraco.packaging.sphinx', 'rst.linker']\n\nmaster_doc = \"index\"\n\nlink_files = {\n '../CHANGES.rst': dict(\n using=dict(\n BB='https://bitbucket.org',\n GH='https://github.com',\n ),\n replace=[\n dict(\n pattern=r'(?<!\\w)PR #(?P<pull>\\d+)',\n url='{package_url}/pull/{pull}',\n ),\n dict(\n pattern=r'(?<!\\w)(Issue )?#(?P<issue>\\d+)',\n url='{package_url}/issues/{issue}',\n ),\n dict(\n pattern=r'BB Pull Request ?#(?P<bb_pull_request>\\d+)',\n url='{BB}/pypa/setuptools/pull-request/{bb_pull_request}',\n ),\n dict(\n pattern=r'Distribute #(?P<distribute>\\d+)',\n url='{BB}/tarek/distribute/issue/{distribute}',\n ),\n dict(\n pattern=r'Buildout #(?P<buildout>\\d+)',\n url='{GH}/buildout/buildout/issues/{buildout}',\n ),\n dict(\n pattern=r'Old Setuptools #(?P<old_setuptools>\\d+)',\n url='http://bugs.python.org/setuptools/issue{old_setuptools}',\n ),\n dict(\n pattern=r'Jython #(?P<jython>\\d+)',\n url='http://bugs.jython.org/issue{jython}',\n ),\n dict(\n pattern=r'(Python #|bpo-)(?P<python>\\d+)',\n url='http://bugs.python.org/issue{python}',\n ),\n dict(\n pattern=r'Interop #(?P<interop>\\d+)',\n url='{GH}/pypa/interoperability-peps/issues/{interop}',\n ),\n dict(\n pattern=r'Pip #(?P<pip>\\d+)',\n url='{GH}/pypa/pip/issues/{pip}',\n ),\n dict(\n pattern=r'Packaging #(?P<packaging>\\d+)',\n url='{GH}/pypa/packaging/issues/{packaging}',\n ),\n dict(\n pattern=r'[Pp]ackaging (?P<packaging_ver>\\d+(\\.\\d+)+)',\n url='{GH}/pypa/packaging/blob/{packaging_ver}/CHANGELOG.rst',\n ),\n dict(\n pattern=r'(?<![`/\\w])PEP[- ](?P<pep_number>\\d+)',\n url='https://www.python.org/dev/peps/pep-{pep_number:0>4}/',\n ),\n dict(\n pattern=r'setuptools_svn #(?P<setuptools_svn>\\d+)',\n url='{GH}/jaraco/setuptools_svn/issues/{setuptools_svn}',\n ),\n dict(\n pattern=r'pypa/(?P<issue_repo>[\\-\\.\\w]+)#(?P<issue_number>\\d+)',\n url='{GH}/pypa/{issue_repo}/issues/{issue_number}',\n ),\n dict(\n pattern=r'pypa/(?P<commit_repo>[\\-\\.\\w]+)@(?P<commit_number>[\\da-f]+)',\n url='{GH}/pypa/{commit_repo}/commit/{commit_number}',\n ),\n dict(\n pattern=r'^(?m)((?P<scm_version>v?\\d+(\\.\\d+){1,2}))\\n[-=]+\\n',\n with_scm='{text}\\n{rev[timestamp]:%d %b %Y}\\n',\n ),\n ],\n ),\n}\n\n# Be strict about any broken references:\nnitpicky = True\n\n# Include Python intersphinx mapping to prevent failures\n# jaraco/skeleton#51\nextensions += ['sphinx.ext.intersphinx']\nintersphinx_mapping = {\n 'python': ('https://docs.python.org/3', None),\n}\n\nintersphinx_mapping.update({\n 'pypa-build': ('https://pypa-build.readthedocs.io/en/latest/', None)\n})\n\n# Add support for linking usernames\ngithub_url = 'https://github.com'\ngithub_repo_org = 'pypa'\ngithub_repo_name = 'setuptools'\ngithub_repo_slug = f'{github_repo_org}/{github_repo_name}'\ngithub_repo_url = f'{github_url}/{github_repo_slug}'\ngithub_sponsors_url = f'{github_url}/sponsors'\nextlinks = {\n 'user': (f'{github_sponsors_url}/%s', '@'), # noqa: WPS323\n 'pypi': ('https://pypi.org/project/%s', '%s'), # noqa: WPS323\n 'wiki': ('https://wikipedia.org/wiki/%s', '%s'), # noqa: WPS323\n}\nextensions += ['sphinx.ext.extlinks']\n\n# Ref: https://github.com/python-attrs/attrs/pull/571/files\\\n# #diff-85987f48f1258d9ee486e3191495582dR82\ndefault_role = 'any'\n\n# HTML theme\nhtml_theme = 'furo'\nhtml_logo = \"images/logo.svg\"\n\nhtml_theme_options = {\n \"sidebar_hide_name\": True,\n \"light_css_variables\": {\n \"color-brand-primary\": \"#336790\", # \"blue\"\n \"color-brand-content\": \"#336790\",\n },\n \"dark_css_variables\": {\n \"color-brand-primary\": \"#E5B62F\", # \"yellow\"\n \"color-brand-content\": \"#E5B62F\",\n },\n}\n\n# Add support for inline tabs\nextensions += ['sphinx_inline_tabs']\n\n# Support for distutils\n\n# Ref: https://stackoverflow.com/a/30624034/595220\nnitpick_ignore = [\n ('c:func', 'SHGetSpecialFolderPath'), # ref to MS docs\n ('envvar', 'DISTUTILS_DEBUG'), # undocumented\n ('envvar', 'HOME'), # undocumented\n ('envvar', 'PLAT'), # undocumented\n ('py:attr', 'CCompiler.language_map'), # undocumented\n ('py:attr', 'CCompiler.language_order'), # undocumented\n ('py:class', 'distutils.dist.Distribution'), # undocumented\n ('py:class', 'distutils.extension.Extension'), # undocumented\n ('py:class', 'BorlandCCompiler'), # undocumented\n ('py:class', 'CCompiler'), # undocumented\n ('py:class', 'CygwinCCompiler'), # undocumented\n ('py:class', 'distutils.dist.DistributionMetadata'), # undocumented\n ('py:class', 'FileList'), # undocumented\n ('py:class', 'IShellLink'), # ref to MS docs\n ('py:class', 'MSVCCompiler'), # undocumented\n ('py:class', 'OptionDummy'), # undocumented\n ('py:class', 'UnixCCompiler'), # undocumented\n ('py:exc', 'CompileError'), # undocumented\n ('py:exc', 'DistutilsExecError'), # undocumented\n ('py:exc', 'DistutilsFileError'), # undocumented\n ('py:exc', 'LibError'), # undocumented\n ('py:exc', 'LinkError'), # undocumented\n ('py:exc', 'PreprocessError'), # undocumented\n ('py:func', 'distutils.CCompiler.new_compiler'), # undocumented\n # undocumented:\n ('py:func', 'distutils.dist.DistributionMetadata.read_pkg_file'),\n ('py:func', 'distutils.file_util._copy_file_contents'), # undocumented\n ('py:func', 'distutils.log.debug'), # undocumented\n ('py:func', 'distutils.spawn.find_executable'), # undocumented\n ('py:func', 'distutils.spawn.spawn'), # undocumented\n # TODO: check https://docutils.rtfd.io in the future\n ('py:mod', 'docutils'), # there's no Sphinx site documenting this\n]\n\n# Allow linking objects on other Sphinx sites seamlessly:\nintersphinx_mapping.update(\n python2=('https://docs.python.org/2', None),\n python=('https://docs.python.org/3', None),\n)\n\n# Add support for the unreleased \"next-version\" change notes\nextensions += ['sphinxcontrib.towncrier']\n# Extension needs a path from here to the towncrier config.\ntowncrier_draft_working_directory = '..'\n# Avoid an empty section for unpublished changes.\ntowncrier_draft_include_empty = False\n\nextensions += ['jaraco.tidelift']\n\n# Add icons (aka \"favicons\") to documentation\nextensions += ['sphinx-favicon']\nhtml_static_path = ['images'] # should contain the folder with icons\n\n# List of dicts with <link> HTML attributes\n# static-file points to files in the html_static_path (href is computed)\nfavicons = [\n { # \"Catch-all\" goes first, otherwise some browsers will overwrite\n \"rel\": \"icon\",\n \"type\": \"image/svg+xml\",\n \"static-file\": \"logo-symbol-only.svg\",\n \"sizes\": \"any\"\n },\n { # Version with thicker strokes for better visibility at smaller sizes\n \"rel\": \"icon\",\n \"type\": \"image/svg+xml\",\n \"static-file\": \"favicon.svg\",\n \"sizes\": \"16x16 24x24 32x32 48x48\"\n },\n # rel=\"apple-touch-icon\" does not support SVG yet\n]\n\nintersphinx_mapping['pip'] = 'https://pip.pypa.io/en/latest', None\nintersphinx_mapping['PyPUG'] = ('https://packaging.python.org/en/latest/', None)\nintersphinx_mapping['packaging'] = ('https://packaging.pypa.io/en/latest/', None)\nintersphinx_mapping['importlib-resources'] = (\n 'https://importlib-resources.readthedocs.io/en/latest', None\n)\n", "path": "docs/conf.py"}], "after_files": [{"content": "extensions = ['sphinx.ext.autodoc', 'jaraco.packaging.sphinx', 'rst.linker']\n\nmaster_doc = \"index\"\n\nlink_files = {\n '../CHANGES.rst': dict(\n using=dict(\n BB='https://bitbucket.org',\n GH='https://github.com',\n ),\n replace=[\n dict(\n pattern=r'(?<!\\w)PR #(?P<pull>\\d+)',\n url='{package_url}/pull/{pull}',\n ),\n dict(\n pattern=r'(?<!\\w)(Issue )?#(?P<issue>\\d+)',\n url='{package_url}/issues/{issue}',\n ),\n dict(\n pattern=r'BB Pull Request ?#(?P<bb_pull_request>\\d+)',\n url='{BB}/pypa/setuptools/pull-request/{bb_pull_request}',\n ),\n dict(\n pattern=r'Distribute #(?P<distribute>\\d+)',\n url='{BB}/tarek/distribute/issue/{distribute}',\n ),\n dict(\n pattern=r'Buildout #(?P<buildout>\\d+)',\n url='{GH}/buildout/buildout/issues/{buildout}',\n ),\n dict(\n pattern=r'Old Setuptools #(?P<old_setuptools>\\d+)',\n url='http://bugs.python.org/setuptools/issue{old_setuptools}',\n ),\n dict(\n pattern=r'Jython #(?P<jython>\\d+)',\n url='http://bugs.jython.org/issue{jython}',\n ),\n dict(\n pattern=r'(Python #|bpo-)(?P<python>\\d+)',\n url='http://bugs.python.org/issue{python}',\n ),\n dict(\n pattern=r'Interop #(?P<interop>\\d+)',\n url='{GH}/pypa/interoperability-peps/issues/{interop}',\n ),\n dict(\n pattern=r'Pip #(?P<pip>\\d+)',\n url='{GH}/pypa/pip/issues/{pip}',\n ),\n dict(\n pattern=r'Packaging #(?P<packaging>\\d+)',\n url='{GH}/pypa/packaging/issues/{packaging}',\n ),\n dict(\n pattern=r'[Pp]ackaging (?P<packaging_ver>\\d+(\\.\\d+)+)',\n url='{GH}/pypa/packaging/blob/{packaging_ver}/CHANGELOG.rst',\n ),\n dict(\n pattern=r'(?<![`/\\w])PEP[- ](?P<pep_number>\\d+)',\n url='https://www.python.org/dev/peps/pep-{pep_number:0>4}/',\n ),\n dict(\n pattern=r'setuptools_svn #(?P<setuptools_svn>\\d+)',\n url='{GH}/jaraco/setuptools_svn/issues/{setuptools_svn}',\n ),\n dict(\n pattern=r'pypa/(?P<issue_repo>[\\-\\.\\w]+)#(?P<issue_number>\\d+)',\n url='{GH}/pypa/{issue_repo}/issues/{issue_number}',\n ),\n dict(\n pattern=r'pypa/(?P<commit_repo>[\\-\\.\\w]+)@(?P<commit_number>[\\da-f]+)',\n url='{GH}/pypa/{commit_repo}/commit/{commit_number}',\n ),\n dict(\n pattern=r'^(?m)((?P<scm_version>v?\\d+(\\.\\d+){1,2}))\\n[-=]+\\n',\n with_scm='{text}\\n{rev[timestamp]:%d %b %Y}\\n',\n ),\n ],\n ),\n}\n\n# Be strict about any broken references:\nnitpicky = True\n\n# Include Python intersphinx mapping to prevent failures\n# jaraco/skeleton#51\nextensions += ['sphinx.ext.intersphinx']\nintersphinx_mapping = {\n 'python': ('https://docs.python.org/3', None),\n}\n\nintersphinx_mapping.update({\n 'pypa-build': ('https://pypa-build.readthedocs.io/en/latest/', None)\n})\n\n# Add support for linking usernames\ngithub_url = 'https://github.com'\ngithub_repo_org = 'pypa'\ngithub_repo_name = 'setuptools'\ngithub_repo_slug = f'{github_repo_org}/{github_repo_name}'\ngithub_repo_url = f'{github_url}/{github_repo_slug}'\ngithub_sponsors_url = f'{github_url}/sponsors'\nextlinks = {\n 'user': (f'{github_sponsors_url}/%s', '@'), # noqa: WPS323\n 'pypi': ('https://pypi.org/project/%s', '%s'), # noqa: WPS323\n 'wiki': ('https://wikipedia.org/wiki/%s', '%s'), # noqa: WPS323\n}\nextensions += ['sphinx.ext.extlinks']\n\n# Ref: https://github.com/python-attrs/attrs/pull/571/files\\\n# #diff-85987f48f1258d9ee486e3191495582dR82\ndefault_role = 'any'\n\n# HTML theme\nhtml_theme = 'furo'\nhtml_logo = \"images/logo.svg\"\n\nhtml_theme_options = {\n \"sidebar_hide_name\": True,\n \"light_css_variables\": {\n \"color-brand-primary\": \"#336790\", # \"blue\"\n \"color-brand-content\": \"#336790\",\n },\n \"dark_css_variables\": {\n \"color-brand-primary\": \"#E5B62F\", # \"yellow\"\n \"color-brand-content\": \"#E5B62F\",\n },\n}\n\n# Redirect old docs so links and references in the ecosystem don't break\nextensions += ['sphinx_reredirects']\nredirects = {\n \"userguide/keywords\": \"/deprecated/changed_keywords.html\",\n}\n\n# Add support for inline tabs\nextensions += ['sphinx_inline_tabs']\n\n# Support for distutils\n\n# Ref: https://stackoverflow.com/a/30624034/595220\nnitpick_ignore = [\n ('c:func', 'SHGetSpecialFolderPath'), # ref to MS docs\n ('envvar', 'DISTUTILS_DEBUG'), # undocumented\n ('envvar', 'HOME'), # undocumented\n ('envvar', 'PLAT'), # undocumented\n ('py:attr', 'CCompiler.language_map'), # undocumented\n ('py:attr', 'CCompiler.language_order'), # undocumented\n ('py:class', 'distutils.dist.Distribution'), # undocumented\n ('py:class', 'distutils.extension.Extension'), # undocumented\n ('py:class', 'BorlandCCompiler'), # undocumented\n ('py:class', 'CCompiler'), # undocumented\n ('py:class', 'CygwinCCompiler'), # undocumented\n ('py:class', 'distutils.dist.DistributionMetadata'), # undocumented\n ('py:class', 'FileList'), # undocumented\n ('py:class', 'IShellLink'), # ref to MS docs\n ('py:class', 'MSVCCompiler'), # undocumented\n ('py:class', 'OptionDummy'), # undocumented\n ('py:class', 'UnixCCompiler'), # undocumented\n ('py:exc', 'CompileError'), # undocumented\n ('py:exc', 'DistutilsExecError'), # undocumented\n ('py:exc', 'DistutilsFileError'), # undocumented\n ('py:exc', 'LibError'), # undocumented\n ('py:exc', 'LinkError'), # undocumented\n ('py:exc', 'PreprocessError'), # undocumented\n ('py:func', 'distutils.CCompiler.new_compiler'), # undocumented\n # undocumented:\n ('py:func', 'distutils.dist.DistributionMetadata.read_pkg_file'),\n ('py:func', 'distutils.file_util._copy_file_contents'), # undocumented\n ('py:func', 'distutils.log.debug'), # undocumented\n ('py:func', 'distutils.spawn.find_executable'), # undocumented\n ('py:func', 'distutils.spawn.spawn'), # undocumented\n # TODO: check https://docutils.rtfd.io in the future\n ('py:mod', 'docutils'), # there's no Sphinx site documenting this\n]\n\n# Allow linking objects on other Sphinx sites seamlessly:\nintersphinx_mapping.update(\n python2=('https://docs.python.org/2', None),\n python=('https://docs.python.org/3', None),\n)\n\n# Add support for the unreleased \"next-version\" change notes\nextensions += ['sphinxcontrib.towncrier']\n# Extension needs a path from here to the towncrier config.\ntowncrier_draft_working_directory = '..'\n# Avoid an empty section for unpublished changes.\ntowncrier_draft_include_empty = False\n\nextensions += ['jaraco.tidelift']\n\n# Add icons (aka \"favicons\") to documentation\nextensions += ['sphinx-favicon']\nhtml_static_path = ['images'] # should contain the folder with icons\n\n# List of dicts with <link> HTML attributes\n# static-file points to files in the html_static_path (href is computed)\nfavicons = [\n { # \"Catch-all\" goes first, otherwise some browsers will overwrite\n \"rel\": \"icon\",\n \"type\": \"image/svg+xml\",\n \"static-file\": \"logo-symbol-only.svg\",\n \"sizes\": \"any\"\n },\n { # Version with thicker strokes for better visibility at smaller sizes\n \"rel\": \"icon\",\n \"type\": \"image/svg+xml\",\n \"static-file\": \"favicon.svg\",\n \"sizes\": \"16x16 24x24 32x32 48x48\"\n },\n # rel=\"apple-touch-icon\" does not support SVG yet\n]\n\nintersphinx_mapping['pip'] = 'https://pip.pypa.io/en/latest', None\nintersphinx_mapping['PyPUG'] = ('https://packaging.python.org/en/latest/', None)\nintersphinx_mapping['packaging'] = ('https://packaging.pypa.io/en/latest/', None)\nintersphinx_mapping['importlib-resources'] = (\n 'https://importlib-resources.readthedocs.io/en/latest', None\n)\n", "path": "docs/conf.py"}]}
| 3,181 | 107 |
gh_patches_debug_12233
|
rasdani/github-patches
|
git_diff
|
googleapis__python-bigquery-833
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Expand pyarrow to support 5.x releases
Changelog: https://raw.githubusercontent.com/apache/arrow/4591d76fce2846a29dac33bf01e9ba0337b118e9/CHANGELOG.md
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 # Copyright 2018 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import io
16 import os
17
18 import setuptools
19
20
21 # Package metadata.
22
23 name = "google-cloud-bigquery"
24 description = "Google BigQuery API client library"
25
26 # Should be one of:
27 # 'Development Status :: 3 - Alpha'
28 # 'Development Status :: 4 - Beta'
29 # 'Development Status :: 5 - Production/Stable'
30 release_status = "Development Status :: 5 - Production/Stable"
31 dependencies = [
32 "grpcio >= 1.38.1, < 2.0dev", # https://github.com/googleapis/python-bigquery/issues/695
33 # NOTE: Maintainers, please do not require google-api-core>=2.x.x
34 # Until this issue is closed
35 # https://github.com/googleapis/google-cloud-python/issues/10566
36 "google-api-core[grpc] >= 1.29.0, <3.0.0dev",
37 "proto-plus >= 1.10.0",
38 # NOTE: Maintainers, please do not require google-cloud-core>=2.x.x
39 # Until this issue is closed
40 # https://github.com/googleapis/google-cloud-python/issues/10566
41 "google-cloud-core >= 1.4.1, <3.0.0dev",
42 "google-resumable-media >= 0.6.0, < 3.0dev",
43 "packaging >= 14.3",
44 "protobuf >= 3.12.0",
45 "requests >= 2.18.0, < 3.0.0dev",
46 ]
47 extras = {
48 "bqstorage": [
49 "google-cloud-bigquery-storage >= 2.0.0, <3.0.0dev",
50 # Due to an issue in pip's dependency resolver, the `grpc` extra is not
51 # installed, even though `google-cloud-bigquery-storage` specifies it
52 # as `google-api-core[grpc]`. We thus need to explicitly specify it here.
53 # See: https://github.com/googleapis/python-bigquery/issues/83 The
54 # grpc.Channel.close() method isn't added until 1.32.0.
55 # https://github.com/grpc/grpc/pull/15254
56 "grpcio >= 1.38.1, < 2.0dev",
57 "pyarrow >= 1.0.0, < 5.0dev",
58 ],
59 "pandas": ["pandas>=0.23.0", "pyarrow >= 1.0.0, < 5.0dev"],
60 "bignumeric_type": ["pyarrow >= 3.0.0, < 5.0dev"],
61 "tqdm": ["tqdm >= 4.7.4, <5.0.0dev"],
62 "opentelemetry": [
63 "opentelemetry-api >= 0.11b0",
64 "opentelemetry-sdk >= 0.11b0",
65 "opentelemetry-instrumentation >= 0.11b0",
66 ],
67 }
68
69 all_extras = []
70
71 for extra in extras:
72 # Exclude this extra from all to avoid overly strict dependencies on core
73 # libraries such as pyarrow.
74 # https://github.com/googleapis/python-bigquery/issues/563
75 if extra in {"bignumeric_type"}:
76 continue
77 all_extras.extend(extras[extra])
78
79 extras["all"] = all_extras
80
81 # Setup boilerplate below this line.
82
83 package_root = os.path.abspath(os.path.dirname(__file__))
84
85 readme_filename = os.path.join(package_root, "README.rst")
86 with io.open(readme_filename, encoding="utf-8") as readme_file:
87 readme = readme_file.read()
88
89 version = {}
90 with open(os.path.join(package_root, "google/cloud/bigquery/version.py")) as fp:
91 exec(fp.read(), version)
92 version = version["__version__"]
93
94 # Only include packages under the 'google' namespace. Do not include tests,
95 # benchmarks, etc.
96 packages = [
97 package
98 for package in setuptools.PEP420PackageFinder.find()
99 if package.startswith("google")
100 ]
101
102 # Determine which namespaces are needed.
103 namespaces = ["google"]
104 if "google.cloud" in packages:
105 namespaces.append("google.cloud")
106
107
108 setuptools.setup(
109 name=name,
110 version=version,
111 description=description,
112 long_description=readme,
113 author="Google LLC",
114 author_email="[email protected]",
115 license="Apache 2.0",
116 url="https://github.com/googleapis/python-bigquery",
117 classifiers=[
118 release_status,
119 "Intended Audience :: Developers",
120 "License :: OSI Approved :: Apache Software License",
121 "Programming Language :: Python",
122 "Programming Language :: Python :: 3",
123 "Programming Language :: Python :: 3.6",
124 "Programming Language :: Python :: 3.7",
125 "Programming Language :: Python :: 3.8",
126 "Programming Language :: Python :: 3.9",
127 "Operating System :: OS Independent",
128 "Topic :: Internet",
129 ],
130 platforms="Posix; MacOS X; Windows",
131 packages=packages,
132 namespace_packages=namespaces,
133 install_requires=dependencies,
134 extras_require=extras,
135 python_requires=">=3.6, <3.10",
136 include_package_data=True,
137 zip_safe=False,
138 )
139
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -54,10 +54,10 @@
# grpc.Channel.close() method isn't added until 1.32.0.
# https://github.com/grpc/grpc/pull/15254
"grpcio >= 1.38.1, < 2.0dev",
- "pyarrow >= 1.0.0, < 5.0dev",
+ "pyarrow >= 1.0.0, < 6.0dev",
],
- "pandas": ["pandas>=0.23.0", "pyarrow >= 1.0.0, < 5.0dev"],
- "bignumeric_type": ["pyarrow >= 3.0.0, < 5.0dev"],
+ "pandas": ["pandas>=0.23.0", "pyarrow >= 1.0.0, < 6.0dev"],
+ "bignumeric_type": ["pyarrow >= 3.0.0, < 6.0dev"],
"tqdm": ["tqdm >= 4.7.4, <5.0.0dev"],
"opentelemetry": [
"opentelemetry-api >= 0.11b0",
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -54,10 +54,10 @@\n # grpc.Channel.close() method isn't added until 1.32.0.\n # https://github.com/grpc/grpc/pull/15254\n \"grpcio >= 1.38.1, < 2.0dev\",\n- \"pyarrow >= 1.0.0, < 5.0dev\",\n+ \"pyarrow >= 1.0.0, < 6.0dev\",\n ],\n- \"pandas\": [\"pandas>=0.23.0\", \"pyarrow >= 1.0.0, < 5.0dev\"],\n- \"bignumeric_type\": [\"pyarrow >= 3.0.0, < 5.0dev\"],\n+ \"pandas\": [\"pandas>=0.23.0\", \"pyarrow >= 1.0.0, < 6.0dev\"],\n+ \"bignumeric_type\": [\"pyarrow >= 3.0.0, < 6.0dev\"],\n \"tqdm\": [\"tqdm >= 4.7.4, <5.0.0dev\"],\n \"opentelemetry\": [\n \"opentelemetry-api >= 0.11b0\",\n", "issue": "Expand pyarrow to support 5.x releases\nChangelog: https://raw.githubusercontent.com/apache/arrow/4591d76fce2846a29dac33bf01e9ba0337b118e9/CHANGELOG.md\n", "before_files": [{"content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport io\nimport os\n\nimport setuptools\n\n\n# Package metadata.\n\nname = \"google-cloud-bigquery\"\ndescription = \"Google BigQuery API client library\"\n\n# Should be one of:\n# 'Development Status :: 3 - Alpha'\n# 'Development Status :: 4 - Beta'\n# 'Development Status :: 5 - Production/Stable'\nrelease_status = \"Development Status :: 5 - Production/Stable\"\ndependencies = [\n \"grpcio >= 1.38.1, < 2.0dev\", # https://github.com/googleapis/python-bigquery/issues/695\n # NOTE: Maintainers, please do not require google-api-core>=2.x.x\n # Until this issue is closed\n # https://github.com/googleapis/google-cloud-python/issues/10566\n \"google-api-core[grpc] >= 1.29.0, <3.0.0dev\",\n \"proto-plus >= 1.10.0\",\n # NOTE: Maintainers, please do not require google-cloud-core>=2.x.x\n # Until this issue is closed\n # https://github.com/googleapis/google-cloud-python/issues/10566\n \"google-cloud-core >= 1.4.1, <3.0.0dev\",\n \"google-resumable-media >= 0.6.0, < 3.0dev\",\n \"packaging >= 14.3\",\n \"protobuf >= 3.12.0\",\n \"requests >= 2.18.0, < 3.0.0dev\",\n]\nextras = {\n \"bqstorage\": [\n \"google-cloud-bigquery-storage >= 2.0.0, <3.0.0dev\",\n # Due to an issue in pip's dependency resolver, the `grpc` extra is not\n # installed, even though `google-cloud-bigquery-storage` specifies it\n # as `google-api-core[grpc]`. We thus need to explicitly specify it here.\n # See: https://github.com/googleapis/python-bigquery/issues/83 The\n # grpc.Channel.close() method isn't added until 1.32.0.\n # https://github.com/grpc/grpc/pull/15254\n \"grpcio >= 1.38.1, < 2.0dev\",\n \"pyarrow >= 1.0.0, < 5.0dev\",\n ],\n \"pandas\": [\"pandas>=0.23.0\", \"pyarrow >= 1.0.0, < 5.0dev\"],\n \"bignumeric_type\": [\"pyarrow >= 3.0.0, < 5.0dev\"],\n \"tqdm\": [\"tqdm >= 4.7.4, <5.0.0dev\"],\n \"opentelemetry\": [\n \"opentelemetry-api >= 0.11b0\",\n \"opentelemetry-sdk >= 0.11b0\",\n \"opentelemetry-instrumentation >= 0.11b0\",\n ],\n}\n\nall_extras = []\n\nfor extra in extras:\n # Exclude this extra from all to avoid overly strict dependencies on core\n # libraries such as pyarrow.\n # https://github.com/googleapis/python-bigquery/issues/563\n if extra in {\"bignumeric_type\"}:\n continue\n all_extras.extend(extras[extra])\n\nextras[\"all\"] = all_extras\n\n# Setup boilerplate below this line.\n\npackage_root = os.path.abspath(os.path.dirname(__file__))\n\nreadme_filename = os.path.join(package_root, \"README.rst\")\nwith io.open(readme_filename, encoding=\"utf-8\") as readme_file:\n readme = readme_file.read()\n\nversion = {}\nwith open(os.path.join(package_root, \"google/cloud/bigquery/version.py\")) as fp:\n exec(fp.read(), version)\nversion = version[\"__version__\"]\n\n# Only include packages under the 'google' namespace. Do not include tests,\n# benchmarks, etc.\npackages = [\n package\n for package in setuptools.PEP420PackageFinder.find()\n if package.startswith(\"google\")\n]\n\n# Determine which namespaces are needed.\nnamespaces = [\"google\"]\nif \"google.cloud\" in packages:\n namespaces.append(\"google.cloud\")\n\n\nsetuptools.setup(\n name=name,\n version=version,\n description=description,\n long_description=readme,\n author=\"Google LLC\",\n author_email=\"[email protected]\",\n license=\"Apache 2.0\",\n url=\"https://github.com/googleapis/python-bigquery\",\n classifiers=[\n release_status,\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Operating System :: OS Independent\",\n \"Topic :: Internet\",\n ],\n platforms=\"Posix; MacOS X; Windows\",\n packages=packages,\n namespace_packages=namespaces,\n install_requires=dependencies,\n extras_require=extras,\n python_requires=\">=3.6, <3.10\",\n include_package_data=True,\n zip_safe=False,\n)\n", "path": "setup.py"}], "after_files": [{"content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport io\nimport os\n\nimport setuptools\n\n\n# Package metadata.\n\nname = \"google-cloud-bigquery\"\ndescription = \"Google BigQuery API client library\"\n\n# Should be one of:\n# 'Development Status :: 3 - Alpha'\n# 'Development Status :: 4 - Beta'\n# 'Development Status :: 5 - Production/Stable'\nrelease_status = \"Development Status :: 5 - Production/Stable\"\ndependencies = [\n \"grpcio >= 1.38.1, < 2.0dev\", # https://github.com/googleapis/python-bigquery/issues/695\n # NOTE: Maintainers, please do not require google-api-core>=2.x.x\n # Until this issue is closed\n # https://github.com/googleapis/google-cloud-python/issues/10566\n \"google-api-core[grpc] >= 1.29.0, <3.0.0dev\",\n \"proto-plus >= 1.10.0\",\n # NOTE: Maintainers, please do not require google-cloud-core>=2.x.x\n # Until this issue is closed\n # https://github.com/googleapis/google-cloud-python/issues/10566\n \"google-cloud-core >= 1.4.1, <3.0.0dev\",\n \"google-resumable-media >= 0.6.0, < 3.0dev\",\n \"packaging >= 14.3\",\n \"protobuf >= 3.12.0\",\n \"requests >= 2.18.0, < 3.0.0dev\",\n]\nextras = {\n \"bqstorage\": [\n \"google-cloud-bigquery-storage >= 2.0.0, <3.0.0dev\",\n # Due to an issue in pip's dependency resolver, the `grpc` extra is not\n # installed, even though `google-cloud-bigquery-storage` specifies it\n # as `google-api-core[grpc]`. We thus need to explicitly specify it here.\n # See: https://github.com/googleapis/python-bigquery/issues/83 The\n # grpc.Channel.close() method isn't added until 1.32.0.\n # https://github.com/grpc/grpc/pull/15254\n \"grpcio >= 1.38.1, < 2.0dev\",\n \"pyarrow >= 1.0.0, < 6.0dev\",\n ],\n \"pandas\": [\"pandas>=0.23.0\", \"pyarrow >= 1.0.0, < 6.0dev\"],\n \"bignumeric_type\": [\"pyarrow >= 3.0.0, < 6.0dev\"],\n \"tqdm\": [\"tqdm >= 4.7.4, <5.0.0dev\"],\n \"opentelemetry\": [\n \"opentelemetry-api >= 0.11b0\",\n \"opentelemetry-sdk >= 0.11b0\",\n \"opentelemetry-instrumentation >= 0.11b0\",\n ],\n}\n\nall_extras = []\n\nfor extra in extras:\n # Exclude this extra from all to avoid overly strict dependencies on core\n # libraries such as pyarrow.\n # https://github.com/googleapis/python-bigquery/issues/563\n if extra in {\"bignumeric_type\"}:\n continue\n all_extras.extend(extras[extra])\n\nextras[\"all\"] = all_extras\n\n# Setup boilerplate below this line.\n\npackage_root = os.path.abspath(os.path.dirname(__file__))\n\nreadme_filename = os.path.join(package_root, \"README.rst\")\nwith io.open(readme_filename, encoding=\"utf-8\") as readme_file:\n readme = readme_file.read()\n\nversion = {}\nwith open(os.path.join(package_root, \"google/cloud/bigquery/version.py\")) as fp:\n exec(fp.read(), version)\nversion = version[\"__version__\"]\n\n# Only include packages under the 'google' namespace. Do not include tests,\n# benchmarks, etc.\npackages = [\n package\n for package in setuptools.PEP420PackageFinder.find()\n if package.startswith(\"google\")\n]\n\n# Determine which namespaces are needed.\nnamespaces = [\"google\"]\nif \"google.cloud\" in packages:\n namespaces.append(\"google.cloud\")\n\n\nsetuptools.setup(\n name=name,\n version=version,\n description=description,\n long_description=readme,\n author=\"Google LLC\",\n author_email=\"[email protected]\",\n license=\"Apache 2.0\",\n url=\"https://github.com/googleapis/python-bigquery\",\n classifiers=[\n release_status,\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Operating System :: OS Independent\",\n \"Topic :: Internet\",\n ],\n platforms=\"Posix; MacOS X; Windows\",\n packages=packages,\n namespace_packages=namespaces,\n install_requires=dependencies,\n extras_require=extras,\n python_requires=\">=3.6, <3.10\",\n include_package_data=True,\n zip_safe=False,\n)\n", "path": "setup.py"}]}
| 1,926 | 302 |
gh_patches_debug_6522
|
rasdani/github-patches
|
git_diff
|
aws-cloudformation__cfn-lint-1256
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
False positive: Sub is required if a variable is used in a string in parameter descriptions
*cfn-lint version: 0.26.0*
*Description of issue.*
Parameter descriptions fail E1029 if they contain text which looks like variable substitution:
e.g.
```yaml
MyContentBucket:
Description: "Bucket name for content (usually ${VPCName}-my-content), use 'none' to disable creation"
Type: String
```
Gives an error:
[E1029: Sub is required if a variable is used in a string] (Found an embedded parameter outside of an "Fn::Sub" at Parameters/MyContentBucket/Description)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/cfnlint/rules/functions/SubNeeded.py`
Content:
```
1 """
2 Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
3 SPDX-License-Identifier: MIT-0
4 """
5 import re
6 from cfnlint.rules import CloudFormationLintRule
7 from cfnlint.rules import RuleMatch
8
9
10 class SubNeeded(CloudFormationLintRule):
11 """Check if a substitution string exists without a substitution function"""
12 id = 'E1029'
13 shortdesc = 'Sub is required if a variable is used in a string'
14 description = 'If a substitution variable exists in a string but isn\'t wrapped with the Fn::Sub function the deployment will fail.'
15 source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-sub.html'
16 tags = ['functions', 'sub']
17
18 # Free-form text properties to exclude from this rule
19 # content is part of AWS::CloudFormation::Init
20 excludes = ['UserData', 'ZipFile', 'Condition', 'AWS::CloudFormation::Init',
21 'CloudWatchAlarmDefinition', 'TopicRulePayload']
22 api_excludes = ['Uri', 'Body']
23
24 # IAM Policy has special variables that don't require !Sub, Check for these
25 # https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_variables.html
26 # https://docs.aws.amazon.com/iot/latest/developerguide/basic-policy-variables.html
27 # https://docs.aws.amazon.com/iot/latest/developerguide/thing-policy-variables.html
28 # https://docs.aws.amazon.com/transfer/latest/userguide/users.html#users-policies-scope-down
29 # https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_iam-condition-keys.html
30 resource_excludes = ['${aws:CurrentTime}', '${aws:EpochTime}',
31 '${aws:TokenIssueTime}', '${aws:principaltype}',
32 '${aws:SecureTransport}', '${aws:SourceIp}',
33 '${aws:UserAgent}', '${aws:userid}',
34 '${aws:username}', '${ec2:SourceInstanceARN}',
35 '${iot:Connection.Thing.ThingName}',
36 '${iot:Connection.Thing.ThingTypeName}',
37 '${iot:Connection.Thing.IsAttached}',
38 '${iot:ClientId}', '${transfer:HomeBucket}',
39 '${transfer:HomeDirectory}', '${transfer:HomeFolder}',
40 '${transfer:UserName}', '${redshift:DbUser}',
41 '${cognito-identity.amazonaws.com:aud}',
42 '${cognito-identity.amazonaws.com:sub}',
43 '${cognito-identity.amazonaws.com:amr}']
44
45 # https://docs.aws.amazon.com/redshift/latest/mgmt/redshift-iam-access-control-identity-based.html
46 condition_excludes = [
47 '${redshift:DbUser}',
48 ]
49
50 def _match_values(self, searchRegex, cfnelem, path):
51 """Recursively search for values matching the searchRegex"""
52 values = []
53 if isinstance(cfnelem, dict):
54 for key in cfnelem:
55 pathprop = path[:]
56 pathprop.append(key)
57 values.extend(self._match_values(searchRegex, cfnelem[key], pathprop))
58 elif isinstance(cfnelem, list):
59 for index, item in enumerate(cfnelem):
60 pathprop = path[:]
61 pathprop.append(index)
62 values.extend(self._match_values(searchRegex, item, pathprop))
63 else:
64 # Leaf node
65 if isinstance(cfnelem, str) and re.match(searchRegex, cfnelem):
66 # Get all variables as seperate paths
67 regex = re.compile(r'(\$\{.*?\.?.*?})')
68 for variable in re.findall(regex, cfnelem):
69 values.append(path + [variable])
70
71 return values
72
73 def match_values(self, searchRegex, cfn):
74 """
75 Search for values in all parts of the templates that match the searchRegex
76 """
77 results = []
78 results.extend(self._match_values(searchRegex, cfn.template, []))
79 # Globals are removed during a transform. They need to be checked manually
80 results.extend(self._match_values(searchRegex, cfn.template.get('Globals', {}), []))
81 return results
82
83 def _api_exceptions(self, value):
84 """ Key value exceptions """
85 parameter_search = re.compile(r'^\$\{stageVariables\..*\}$')
86 return re.match(parameter_search, value)
87
88 def match(self, cfn):
89 """Basic Rule Matching"""
90
91 matches = []
92
93 # Generic regex to match a string containing at least one ${parameter}
94 parameter_search = re.compile(r'^.*(\$\{.*\}.*(\$\{.*\}.*)*)$')
95
96 # Get a list of paths to every leaf node string containing at least one ${parameter}
97 parameter_string_paths = self.match_values(parameter_search, cfn)
98
99 # We want to search all of the paths to check if each one contains an 'Fn::Sub'
100 for parameter_string_path in parameter_string_paths:
101 # Exxclude the special IAM variables
102 variable = parameter_string_path[-1]
103
104 if 'Resource' in parameter_string_path:
105 if variable in self.resource_excludes:
106 continue
107 if 'Condition' in parameter_string_path:
108 if variable in self.condition_excludes:
109 continue
110
111 # Exclude literals (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-sub.html)
112 if variable.startswith('${!'):
113 continue
114
115 found_sub = False
116 # Does the path contain an 'Fn::Sub'?
117 for step in parameter_string_path:
118 if step in self.api_excludes:
119 if self._api_exceptions(parameter_string_path[-1]):
120 found_sub = True
121 elif step == 'Fn::Sub' or step in self.excludes:
122 found_sub = True
123
124 # If we didn't find an 'Fn::Sub' it means a string containing a ${parameter} may not be evaluated correctly
125 if not found_sub:
126 # Remove the last item (the variable) to prevent multiple errors on 1 line errors
127 path = parameter_string_path[:-1]
128 message = 'Found an embedded parameter outside of an "Fn::Sub" at {}'.format(
129 '/'.join(map(str, path)))
130 matches.append(RuleMatch(path, message))
131
132 return matches
133
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/cfnlint/rules/functions/SubNeeded.py b/src/cfnlint/rules/functions/SubNeeded.py
--- a/src/cfnlint/rules/functions/SubNeeded.py
+++ b/src/cfnlint/rules/functions/SubNeeded.py
@@ -98,6 +98,8 @@
# We want to search all of the paths to check if each one contains an 'Fn::Sub'
for parameter_string_path in parameter_string_paths:
+ if parameter_string_path[0] in ['Parameters']:
+ continue
# Exxclude the special IAM variables
variable = parameter_string_path[-1]
|
{"golden_diff": "diff --git a/src/cfnlint/rules/functions/SubNeeded.py b/src/cfnlint/rules/functions/SubNeeded.py\n--- a/src/cfnlint/rules/functions/SubNeeded.py\n+++ b/src/cfnlint/rules/functions/SubNeeded.py\n@@ -98,6 +98,8 @@\n \n # We want to search all of the paths to check if each one contains an 'Fn::Sub'\n for parameter_string_path in parameter_string_paths:\n+ if parameter_string_path[0] in ['Parameters']:\n+ continue\n # Exxclude the special IAM variables\n variable = parameter_string_path[-1]\n", "issue": "False positive: Sub is required if a variable is used in a string in parameter descriptions\n*cfn-lint version: 0.26.0*\r\n\r\n*Description of issue.*\r\nParameter descriptions fail E1029 if they contain text which looks like variable substitution:\r\n\r\ne.g.\r\n\r\n```yaml\r\n MyContentBucket:\r\n Description: \"Bucket name for content (usually ${VPCName}-my-content), use 'none' to disable creation\"\r\n Type: String\r\n```\r\n\r\nGives an error:\r\n\r\n [E1029: Sub is required if a variable is used in a string] (Found an embedded parameter outside of an \"Fn::Sub\" at Parameters/MyContentBucket/Description)\r\n\n", "before_files": [{"content": "\"\"\"\nCopyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.\nSPDX-License-Identifier: MIT-0\n\"\"\"\nimport re\nfrom cfnlint.rules import CloudFormationLintRule\nfrom cfnlint.rules import RuleMatch\n\n\nclass SubNeeded(CloudFormationLintRule):\n \"\"\"Check if a substitution string exists without a substitution function\"\"\"\n id = 'E1029'\n shortdesc = 'Sub is required if a variable is used in a string'\n description = 'If a substitution variable exists in a string but isn\\'t wrapped with the Fn::Sub function the deployment will fail.'\n source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-sub.html'\n tags = ['functions', 'sub']\n\n # Free-form text properties to exclude from this rule\n # content is part of AWS::CloudFormation::Init\n excludes = ['UserData', 'ZipFile', 'Condition', 'AWS::CloudFormation::Init',\n 'CloudWatchAlarmDefinition', 'TopicRulePayload']\n api_excludes = ['Uri', 'Body']\n\n # IAM Policy has special variables that don't require !Sub, Check for these\n # https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_variables.html\n # https://docs.aws.amazon.com/iot/latest/developerguide/basic-policy-variables.html\n # https://docs.aws.amazon.com/iot/latest/developerguide/thing-policy-variables.html\n # https://docs.aws.amazon.com/transfer/latest/userguide/users.html#users-policies-scope-down\n # https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_iam-condition-keys.html\n resource_excludes = ['${aws:CurrentTime}', '${aws:EpochTime}',\n '${aws:TokenIssueTime}', '${aws:principaltype}',\n '${aws:SecureTransport}', '${aws:SourceIp}',\n '${aws:UserAgent}', '${aws:userid}',\n '${aws:username}', '${ec2:SourceInstanceARN}',\n '${iot:Connection.Thing.ThingName}',\n '${iot:Connection.Thing.ThingTypeName}',\n '${iot:Connection.Thing.IsAttached}',\n '${iot:ClientId}', '${transfer:HomeBucket}',\n '${transfer:HomeDirectory}', '${transfer:HomeFolder}',\n '${transfer:UserName}', '${redshift:DbUser}',\n '${cognito-identity.amazonaws.com:aud}',\n '${cognito-identity.amazonaws.com:sub}',\n '${cognito-identity.amazonaws.com:amr}']\n\n # https://docs.aws.amazon.com/redshift/latest/mgmt/redshift-iam-access-control-identity-based.html\n condition_excludes = [\n '${redshift:DbUser}',\n ]\n\n def _match_values(self, searchRegex, cfnelem, path):\n \"\"\"Recursively search for values matching the searchRegex\"\"\"\n values = []\n if isinstance(cfnelem, dict):\n for key in cfnelem:\n pathprop = path[:]\n pathprop.append(key)\n values.extend(self._match_values(searchRegex, cfnelem[key], pathprop))\n elif isinstance(cfnelem, list):\n for index, item in enumerate(cfnelem):\n pathprop = path[:]\n pathprop.append(index)\n values.extend(self._match_values(searchRegex, item, pathprop))\n else:\n # Leaf node\n if isinstance(cfnelem, str) and re.match(searchRegex, cfnelem):\n # Get all variables as seperate paths\n regex = re.compile(r'(\\$\\{.*?\\.?.*?})')\n for variable in re.findall(regex, cfnelem):\n values.append(path + [variable])\n\n return values\n\n def match_values(self, searchRegex, cfn):\n \"\"\"\n Search for values in all parts of the templates that match the searchRegex\n \"\"\"\n results = []\n results.extend(self._match_values(searchRegex, cfn.template, []))\n # Globals are removed during a transform. They need to be checked manually\n results.extend(self._match_values(searchRegex, cfn.template.get('Globals', {}), []))\n return results\n\n def _api_exceptions(self, value):\n \"\"\" Key value exceptions \"\"\"\n parameter_search = re.compile(r'^\\$\\{stageVariables\\..*\\}$')\n return re.match(parameter_search, value)\n\n def match(self, cfn):\n \"\"\"Basic Rule Matching\"\"\"\n\n matches = []\n\n # Generic regex to match a string containing at least one ${parameter}\n parameter_search = re.compile(r'^.*(\\$\\{.*\\}.*(\\$\\{.*\\}.*)*)$')\n\n # Get a list of paths to every leaf node string containing at least one ${parameter}\n parameter_string_paths = self.match_values(parameter_search, cfn)\n\n # We want to search all of the paths to check if each one contains an 'Fn::Sub'\n for parameter_string_path in parameter_string_paths:\n # Exxclude the special IAM variables\n variable = parameter_string_path[-1]\n\n if 'Resource' in parameter_string_path:\n if variable in self.resource_excludes:\n continue\n if 'Condition' in parameter_string_path:\n if variable in self.condition_excludes:\n continue\n\n # Exclude literals (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-sub.html)\n if variable.startswith('${!'):\n continue\n\n found_sub = False\n # Does the path contain an 'Fn::Sub'?\n for step in parameter_string_path:\n if step in self.api_excludes:\n if self._api_exceptions(parameter_string_path[-1]):\n found_sub = True\n elif step == 'Fn::Sub' or step in self.excludes:\n found_sub = True\n\n # If we didn't find an 'Fn::Sub' it means a string containing a ${parameter} may not be evaluated correctly\n if not found_sub:\n # Remove the last item (the variable) to prevent multiple errors on 1 line errors\n path = parameter_string_path[:-1]\n message = 'Found an embedded parameter outside of an \"Fn::Sub\" at {}'.format(\n '/'.join(map(str, path)))\n matches.append(RuleMatch(path, message))\n\n return matches\n", "path": "src/cfnlint/rules/functions/SubNeeded.py"}], "after_files": [{"content": "\"\"\"\nCopyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.\nSPDX-License-Identifier: MIT-0\n\"\"\"\nimport re\nfrom cfnlint.rules import CloudFormationLintRule\nfrom cfnlint.rules import RuleMatch\n\n\nclass SubNeeded(CloudFormationLintRule):\n \"\"\"Check if a substitution string exists without a substitution function\"\"\"\n id = 'E1029'\n shortdesc = 'Sub is required if a variable is used in a string'\n description = 'If a substitution variable exists in a string but isn\\'t wrapped with the Fn::Sub function the deployment will fail.'\n source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-sub.html'\n tags = ['functions', 'sub']\n\n # Free-form text properties to exclude from this rule\n # content is part of AWS::CloudFormation::Init\n excludes = ['UserData', 'ZipFile', 'Condition', 'AWS::CloudFormation::Init',\n 'CloudWatchAlarmDefinition', 'TopicRulePayload']\n api_excludes = ['Uri', 'Body']\n\n # IAM Policy has special variables that don't require !Sub, Check for these\n # https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_variables.html\n # https://docs.aws.amazon.com/iot/latest/developerguide/basic-policy-variables.html\n # https://docs.aws.amazon.com/iot/latest/developerguide/thing-policy-variables.html\n # https://docs.aws.amazon.com/transfer/latest/userguide/users.html#users-policies-scope-down\n # https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_iam-condition-keys.html\n resource_excludes = ['${aws:CurrentTime}', '${aws:EpochTime}',\n '${aws:TokenIssueTime}', '${aws:principaltype}',\n '${aws:SecureTransport}', '${aws:SourceIp}',\n '${aws:UserAgent}', '${aws:userid}',\n '${aws:username}', '${ec2:SourceInstanceARN}',\n '${iot:Connection.Thing.ThingName}',\n '${iot:Connection.Thing.ThingTypeName}',\n '${iot:Connection.Thing.IsAttached}',\n '${iot:ClientId}', '${transfer:HomeBucket}',\n '${transfer:HomeDirectory}', '${transfer:HomeFolder}',\n '${transfer:UserName}', '${redshift:DbUser}',\n '${cognito-identity.amazonaws.com:aud}',\n '${cognito-identity.amazonaws.com:sub}',\n '${cognito-identity.amazonaws.com:amr}']\n\n # https://docs.aws.amazon.com/redshift/latest/mgmt/redshift-iam-access-control-identity-based.html\n condition_excludes = [\n '${redshift:DbUser}',\n ]\n\n def _match_values(self, searchRegex, cfnelem, path):\n \"\"\"Recursively search for values matching the searchRegex\"\"\"\n values = []\n if isinstance(cfnelem, dict):\n for key in cfnelem:\n pathprop = path[:]\n pathprop.append(key)\n values.extend(self._match_values(searchRegex, cfnelem[key], pathprop))\n elif isinstance(cfnelem, list):\n for index, item in enumerate(cfnelem):\n pathprop = path[:]\n pathprop.append(index)\n values.extend(self._match_values(searchRegex, item, pathprop))\n else:\n # Leaf node\n if isinstance(cfnelem, str) and re.match(searchRegex, cfnelem):\n # Get all variables as seperate paths\n regex = re.compile(r'(\\$\\{.*?\\.?.*?})')\n for variable in re.findall(regex, cfnelem):\n values.append(path + [variable])\n\n return values\n\n def match_values(self, searchRegex, cfn):\n \"\"\"\n Search for values in all parts of the templates that match the searchRegex\n \"\"\"\n results = []\n results.extend(self._match_values(searchRegex, cfn.template, []))\n # Globals are removed during a transform. They need to be checked manually\n results.extend(self._match_values(searchRegex, cfn.template.get('Globals', {}), []))\n return results\n\n def _api_exceptions(self, value):\n \"\"\" Key value exceptions \"\"\"\n parameter_search = re.compile(r'^\\$\\{stageVariables\\..*\\}$')\n return re.match(parameter_search, value)\n\n def match(self, cfn):\n \"\"\"Basic Rule Matching\"\"\"\n\n matches = []\n\n # Generic regex to match a string containing at least one ${parameter}\n parameter_search = re.compile(r'^.*(\\$\\{.*\\}.*(\\$\\{.*\\}.*)*)$')\n\n # Get a list of paths to every leaf node string containing at least one ${parameter}\n parameter_string_paths = self.match_values(parameter_search, cfn)\n\n # We want to search all of the paths to check if each one contains an 'Fn::Sub'\n for parameter_string_path in parameter_string_paths:\n if parameter_string_path[0] in ['Parameters']:\n continue\n # Exxclude the special IAM variables\n variable = parameter_string_path[-1]\n\n if 'Resource' in parameter_string_path:\n if variable in self.resource_excludes:\n continue\n if 'Condition' in parameter_string_path:\n if variable in self.condition_excludes:\n continue\n\n # Exclude literals (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-sub.html)\n if variable.startswith('${!'):\n continue\n\n found_sub = False\n # Does the path contain an 'Fn::Sub'?\n for step in parameter_string_path:\n if step in self.api_excludes:\n if self._api_exceptions(parameter_string_path[-1]):\n found_sub = True\n elif step == 'Fn::Sub' or step in self.excludes:\n found_sub = True\n\n # If we didn't find an 'Fn::Sub' it means a string containing a ${parameter} may not be evaluated correctly\n if not found_sub:\n # Remove the last item (the variable) to prevent multiple errors on 1 line errors\n path = parameter_string_path[:-1]\n message = 'Found an embedded parameter outside of an \"Fn::Sub\" at {}'.format(\n '/'.join(map(str, path)))\n matches.append(RuleMatch(path, message))\n\n return matches\n", "path": "src/cfnlint/rules/functions/SubNeeded.py"}]}
| 2,040 | 129 |
gh_patches_debug_17133
|
rasdani/github-patches
|
git_diff
|
python-poetry__poetry-7547
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
add -e/--executable to poetry env info to get the python executable path
- [x] I have searched the [issues](https://github.com/python-poetry/poetry/issues) of this repo and believe that this is not a duplicate.
- [x] I have searched the [FAQ](https://python-poetry.org/docs/faq/) and general [documentation](https://python-poetry.org/docs/) and believe that my question is not already covered.
## Feature Request
in addition to the already present `-p/--path` option, add a `-e/--execuatble` option to return the python executable path.
My use case: I'm starting to use Taskfile and poetry on some projects; these project are developed on both linux and windows;
I would like to avoid having to install tools such as mypy in the virtual environment, since they can be run from the outside (this also allows me to have faster CI, I have set up a custom docker image with all the tools needed).
mypy in particular wants to know the exact path of the python executable to work (passed as `--python-executable` option), so having a new `poetry env info --executable` option that outputs the python path would solve my issue in a cross-platform fashion.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/poetry/console/commands/env/info.py`
Content:
```
1 from __future__ import annotations
2
3 from typing import TYPE_CHECKING
4
5 from cleo.helpers import option
6
7 from poetry.console.commands.command import Command
8
9
10 if TYPE_CHECKING:
11 from poetry.utils.env import Env
12
13
14 class EnvInfoCommand(Command):
15 name = "env info"
16 description = "Displays information about the current environment."
17
18 options = [option("path", "p", "Only display the environment's path.")]
19
20 def handle(self) -> int:
21 from poetry.utils.env import EnvManager
22
23 env = EnvManager(self.poetry).get()
24
25 if self.option("path"):
26 if not env.is_venv():
27 return 1
28
29 self.line(str(env.path))
30
31 return 0
32
33 self._display_complete_info(env)
34 return 0
35
36 def _display_complete_info(self, env: Env) -> None:
37 env_python_version = ".".join(str(s) for s in env.version_info[:3])
38 self.line("")
39 self.line("<b>Virtualenv</b>")
40 listing = [
41 f"<info>Python</info>: <comment>{env_python_version}</>",
42 f"<info>Implementation</info>: <comment>{env.python_implementation}</>",
43 (
44 "<info>Path</info>: "
45 f" <comment>{env.path if env.is_venv() else 'NA'}</>"
46 ),
47 (
48 "<info>Executable</info>: "
49 f" <comment>{env.python if env.is_venv() else 'NA'}</>"
50 ),
51 ]
52 if env.is_venv():
53 listing.append(
54 "<info>Valid</info>: "
55 f" <{'comment' if env.is_sane() else 'error'}>{env.is_sane()}</>"
56 )
57 self.line("\n".join(listing))
58
59 self.line("")
60
61 system_env = env.parent_env
62 python = ".".join(str(v) for v in system_env.version_info[:3])
63 self.line("<b>System</b>")
64 self.line(
65 "\n".join(
66 [
67 f"<info>Platform</info>: <comment>{env.platform}</>",
68 f"<info>OS</info>: <comment>{env.os}</>",
69 f"<info>Python</info>: <comment>{python}</>",
70 f"<info>Path</info>: <comment>{system_env.path}</>",
71 f"<info>Executable</info>: <comment>{system_env.python}</>",
72 ]
73 )
74 )
75
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/poetry/console/commands/env/info.py b/src/poetry/console/commands/env/info.py
--- a/src/poetry/console/commands/env/info.py
+++ b/src/poetry/console/commands/env/info.py
@@ -15,7 +15,12 @@
name = "env info"
description = "Displays information about the current environment."
- options = [option("path", "p", "Only display the environment's path.")]
+ options = [
+ option("path", "p", "Only display the environment's path."),
+ option(
+ "executable", "e", "Only display the environment's python executable path."
+ ),
+ ]
def handle(self) -> int:
from poetry.utils.env import EnvManager
@@ -30,6 +35,14 @@
return 0
+ if self.option("executable"):
+ if not env.is_venv():
+ return 1
+
+ self.line(str(env.python))
+
+ return 0
+
self._display_complete_info(env)
return 0
|
{"golden_diff": "diff --git a/src/poetry/console/commands/env/info.py b/src/poetry/console/commands/env/info.py\n--- a/src/poetry/console/commands/env/info.py\n+++ b/src/poetry/console/commands/env/info.py\n@@ -15,7 +15,12 @@\n name = \"env info\"\n description = \"Displays information about the current environment.\"\n \n- options = [option(\"path\", \"p\", \"Only display the environment's path.\")]\n+ options = [\n+ option(\"path\", \"p\", \"Only display the environment's path.\"),\n+ option(\n+ \"executable\", \"e\", \"Only display the environment's python executable path.\"\n+ ),\n+ ]\n \n def handle(self) -> int:\n from poetry.utils.env import EnvManager\n@@ -30,6 +35,14 @@\n \n return 0\n \n+ if self.option(\"executable\"):\n+ if not env.is_venv():\n+ return 1\n+\n+ self.line(str(env.python))\n+\n+ return 0\n+\n self._display_complete_info(env)\n return 0\n", "issue": "add -e/--executable to poetry env info to get the python executable path\n- [x] I have searched the [issues](https://github.com/python-poetry/poetry/issues) of this repo and believe that this is not a duplicate.\r\n- [x] I have searched the [FAQ](https://python-poetry.org/docs/faq/) and general [documentation](https://python-poetry.org/docs/) and believe that my question is not already covered.\r\n\r\n## Feature Request\r\n\r\nin addition to the already present `-p/--path` option, add a `-e/--execuatble` option to return the python executable path.\r\n\r\nMy use case: I'm starting to use Taskfile and poetry on some projects; these project are developed on both linux and windows;\r\n\r\nI would like to avoid having to install tools such as mypy in the virtual environment, since they can be run from the outside (this also allows me to have faster CI, I have set up a custom docker image with all the tools needed).\r\n\r\nmypy in particular wants to know the exact path of the python executable to work (passed as `--python-executable` option), so having a new `poetry env info --executable` option that outputs the python path would solve my issue in a cross-platform fashion.\r\n\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom typing import TYPE_CHECKING\n\nfrom cleo.helpers import option\n\nfrom poetry.console.commands.command import Command\n\n\nif TYPE_CHECKING:\n from poetry.utils.env import Env\n\n\nclass EnvInfoCommand(Command):\n name = \"env info\"\n description = \"Displays information about the current environment.\"\n\n options = [option(\"path\", \"p\", \"Only display the environment's path.\")]\n\n def handle(self) -> int:\n from poetry.utils.env import EnvManager\n\n env = EnvManager(self.poetry).get()\n\n if self.option(\"path\"):\n if not env.is_venv():\n return 1\n\n self.line(str(env.path))\n\n return 0\n\n self._display_complete_info(env)\n return 0\n\n def _display_complete_info(self, env: Env) -> None:\n env_python_version = \".\".join(str(s) for s in env.version_info[:3])\n self.line(\"\")\n self.line(\"<b>Virtualenv</b>\")\n listing = [\n f\"<info>Python</info>: <comment>{env_python_version}</>\",\n f\"<info>Implementation</info>: <comment>{env.python_implementation}</>\",\n (\n \"<info>Path</info>: \"\n f\" <comment>{env.path if env.is_venv() else 'NA'}</>\"\n ),\n (\n \"<info>Executable</info>: \"\n f\" <comment>{env.python if env.is_venv() else 'NA'}</>\"\n ),\n ]\n if env.is_venv():\n listing.append(\n \"<info>Valid</info>: \"\n f\" <{'comment' if env.is_sane() else 'error'}>{env.is_sane()}</>\"\n )\n self.line(\"\\n\".join(listing))\n\n self.line(\"\")\n\n system_env = env.parent_env\n python = \".\".join(str(v) for v in system_env.version_info[:3])\n self.line(\"<b>System</b>\")\n self.line(\n \"\\n\".join(\n [\n f\"<info>Platform</info>: <comment>{env.platform}</>\",\n f\"<info>OS</info>: <comment>{env.os}</>\",\n f\"<info>Python</info>: <comment>{python}</>\",\n f\"<info>Path</info>: <comment>{system_env.path}</>\",\n f\"<info>Executable</info>: <comment>{system_env.python}</>\",\n ]\n )\n )\n", "path": "src/poetry/console/commands/env/info.py"}], "after_files": [{"content": "from __future__ import annotations\n\nfrom typing import TYPE_CHECKING\n\nfrom cleo.helpers import option\n\nfrom poetry.console.commands.command import Command\n\n\nif TYPE_CHECKING:\n from poetry.utils.env import Env\n\n\nclass EnvInfoCommand(Command):\n name = \"env info\"\n description = \"Displays information about the current environment.\"\n\n options = [\n option(\"path\", \"p\", \"Only display the environment's path.\"),\n option(\n \"executable\", \"e\", \"Only display the environment's python executable path.\"\n ),\n ]\n\n def handle(self) -> int:\n from poetry.utils.env import EnvManager\n\n env = EnvManager(self.poetry).get()\n\n if self.option(\"path\"):\n if not env.is_venv():\n return 1\n\n self.line(str(env.path))\n\n return 0\n\n if self.option(\"executable\"):\n if not env.is_venv():\n return 1\n\n self.line(str(env.python))\n\n return 0\n\n self._display_complete_info(env)\n return 0\n\n def _display_complete_info(self, env: Env) -> None:\n env_python_version = \".\".join(str(s) for s in env.version_info[:3])\n self.line(\"\")\n self.line(\"<b>Virtualenv</b>\")\n listing = [\n f\"<info>Python</info>: <comment>{env_python_version}</>\",\n f\"<info>Implementation</info>: <comment>{env.python_implementation}</>\",\n (\n \"<info>Path</info>: \"\n f\" <comment>{env.path if env.is_venv() else 'NA'}</>\"\n ),\n (\n \"<info>Executable</info>: \"\n f\" <comment>{env.python if env.is_venv() else 'NA'}</>\"\n ),\n ]\n if env.is_venv():\n listing.append(\n \"<info>Valid</info>: \"\n f\" <{'comment' if env.is_sane() else 'error'}>{env.is_sane()}</>\"\n )\n self.line(\"\\n\".join(listing))\n\n self.line(\"\")\n\n system_env = env.parent_env\n python = \".\".join(str(v) for v in system_env.version_info[:3])\n self.line(\"<b>System</b>\")\n self.line(\n \"\\n\".join(\n [\n f\"<info>Platform</info>: <comment>{env.platform}</>\",\n f\"<info>OS</info>: <comment>{env.os}</>\",\n f\"<info>Python</info>: <comment>{python}</>\",\n f\"<info>Path</info>: <comment>{system_env.path}</>\",\n f\"<info>Executable</info>: <comment>{system_env.python}</>\",\n ]\n )\n )\n", "path": "src/poetry/console/commands/env/info.py"}]}
| 1,210 | 245 |
gh_patches_debug_32591
|
rasdani/github-patches
|
git_diff
|
secdev__scapy-2537
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
IGMPv3 igmpize broken
#### Brief description
the `igmpize` method is broken when used on IGMPv3 message types that don't appear in IGMP.
#### Environment
- Scapy version: `scapy version and/or commit-hash`
aefcfd322999637abeed183447d60112070e7a35 (master)
- Python version: `e.g. 3.5`
Python 3.6.10
- Operating System: `e.g. Minix 3.4`
Nixos Unstable (nixos-20.09pre215947.82b54d49066)
#### How to reproduce
```
from scapy.contrib.igmpv3 import *
a = Ether()
b = IP()
c = IGMPv3()/IGMPv3mr(records = [IGMPv3gr(maddr = "232.1.1.10", srcaddrs = "10.0.0.10")])
x = a/b/c
x[IGMPv3].igmpize()
```
1. construct a simple IGMPv3 packet (but not a membership query, which is shared with IGMP)
2. try to `.igmpize()`
#### Actual result
```
>>> x[IGMPv3].igmpize()
WARNING: Invalid IGMP Type detected !
False
```
#### Expected result
It would return `True`, and then I would be able to send the packet with sendp() and it would be valid.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scapy/contrib/igmp.py`
Content:
```
1 # This file is part of Scapy
2 # Scapy is free software: you can redistribute it and/or modify
3 # it under the terms of the GNU General Public License as published by
4 # the Free Software Foundation, either version 2 of the License, or
5 # any later version.
6 #
7 # Scapy is distributed in the hope that it will be useful,
8 # but WITHOUT ANY WARRANTY; without even the implied warranty of
9 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 # GNU General Public License for more details.
11 #
12 # You should have received a copy of the GNU General Public License
13 # along with Scapy. If not, see <http://www.gnu.org/licenses/>.
14
15 # scapy.contrib.description = Internet Group Management Protocol v1/v2 (IGMP/IGMPv2)
16 # scapy.contrib.status = loads
17
18 from __future__ import print_function
19 from scapy.compat import chb, orb
20 from scapy.error import warning
21 from scapy.fields import ByteEnumField, ByteField, IPField, XShortField
22 from scapy.layers.inet import IP, IPOption_Router_Alert
23 from scapy.layers.l2 import Ether, getmacbyip
24 from scapy.packet import bind_layers, Packet
25 from scapy.utils import atol, checksum
26
27
28 def isValidMCAddr(ip):
29 """convert dotted quad string to long and check the first octet"""
30 FirstOct = atol(ip) >> 24 & 0xFF
31 return (FirstOct >= 224) and (FirstOct <= 239)
32
33
34 class IGMP(Packet):
35 """IGMP Message Class for v1 and v2.
36
37 This class is derived from class Packet. You need call "igmpize()"
38 so the packet is transformed according the RFC when sent.
39 a=Ether(src="00:01:02:03:04:05")
40 b=IP(src="1.2.3.4")
41 c=IGMP(type=0x12, gaddr="224.2.3.4")
42 x = a/b/c
43 x[IGMP].igmpize()
44 sendp(a/b/c, iface="en0")
45
46 Parameters:
47 type IGMP type field, 0x11, 0x12, 0x16 or 0x17
48 mrcode Maximum Response time (zero for v1)
49 gaddr Multicast Group Address 224.x.x.x/4
50
51 See RFC2236, Section 2. Introduction for definitions of proper
52 IGMPv2 message format http://www.faqs.org/rfcs/rfc2236.html
53 """
54 name = "IGMP"
55
56 igmptypes = {0x11: "Group Membership Query",
57 0x12: "Version 1 - Membership Report",
58 0x16: "Version 2 - Membership Report",
59 0x17: "Leave Group"}
60
61 fields_desc = [ByteEnumField("type", 0x11, igmptypes),
62 ByteField("mrcode", 20),
63 XShortField("chksum", None),
64 IPField("gaddr", "0.0.0.0")]
65
66 def post_build(self, p, pay):
67 """Called implicitly before a packet is sent to compute and place IGMP checksum.
68
69 Parameters:
70 self The instantiation of an IGMP class
71 p The IGMP message in hex in network byte order
72 pay Additional payload for the IGMP message
73 """
74 p += pay
75 if self.chksum is None:
76 ck = checksum(p)
77 p = p[:2] + chb(ck >> 8) + chb(ck & 0xff) + p[4:]
78 return p
79
80 @classmethod
81 def dispatch_hook(cls, _pkt=None, *args, **kargs):
82 if _pkt and len(_pkt) >= 4:
83 from scapy.contrib.igmpv3 import IGMPv3
84 if orb(_pkt[0]) in [0x22, 0x30, 0x31, 0x32]:
85 return IGMPv3
86 if orb(_pkt[0]) == 0x11 and len(_pkt) >= 12:
87 return IGMPv3
88 return IGMP
89
90 def igmpize(self):
91 """Called to explicitly fixup the packet according to the IGMP RFC
92
93 The rules are:
94 - General:
95 1. the Max Response time is meaningful only in Membership Queries and should be zero
96 - IP:
97 1. Send General Group Query to 224.0.0.1 (all systems)
98 2. Send Leave Group to 224.0.0.2 (all routers)
99 3a.Otherwise send the packet to the group address
100 3b.Send reports/joins to the group address
101 4. ttl = 1 (RFC 2236, section 2)
102 5. send the packet with the router alert IP option (RFC 2236, section 2)
103 - Ether:
104 1. Recalculate destination
105
106 Returns:
107 True The tuple ether/ip/self passed all check and represents
108 a proper IGMP packet.
109 False One of more validation checks failed and no fields
110 were adjusted.
111
112 The function will examine the IGMP message to assure proper format.
113 Corrections will be attempted if possible. The IP header is then properly
114 adjusted to ensure correct formatting and assignment. The Ethernet header
115 is then adjusted to the proper IGMP packet format.
116 """
117 gaddr = self.gaddr if hasattr(self, "gaddr") and self.gaddr else "0.0.0.0" # noqa: E501
118 underlayer = self.underlayer
119 if self.type not in [0x11, 0x30]: # General Rule 1 # noqa: E501
120 self.mrcode = 0
121 if isinstance(underlayer, IP):
122 if (self.type == 0x11):
123 if (gaddr == "0.0.0.0"):
124 underlayer.dst = "224.0.0.1" # IP rule 1 # noqa: E501
125 elif isValidMCAddr(gaddr):
126 underlayer.dst = gaddr # IP rule 3a # noqa: E501
127 else:
128 warning("Invalid IGMP Group Address detected !")
129 return False
130 elif ((self.type == 0x17) and isValidMCAddr(gaddr)):
131 underlayer.dst = "224.0.0.2" # IP rule 2 # noqa: E501
132 elif ((self.type == 0x12) or (self.type == 0x16)) and (isValidMCAddr(gaddr)): # noqa: E501
133 underlayer.dst = gaddr # IP rule 3b # noqa: E501
134 else:
135 warning("Invalid IGMP Type detected !")
136 return False
137 if not any(isinstance(x, IPOption_Router_Alert) for x in underlayer.options): # noqa: E501
138 underlayer.options.append(IPOption_Router_Alert())
139 underlayer.ttl = 1 # IP rule 4
140 _root = self.firstlayer()
141 if _root.haslayer(Ether):
142 # Force recalculate Ether dst
143 _root[Ether].dst = getmacbyip(underlayer.dst) # Ether rule 1 # noqa: E501
144 from scapy.contrib.igmpv3 import IGMPv3
145 if isinstance(self, IGMPv3):
146 self.encode_maxrespcode()
147 return True
148
149 def mysummary(self):
150 """Display a summary of the IGMP object."""
151 if isinstance(self.underlayer, IP):
152 return self.underlayer.sprintf("IGMP: %IP.src% > %IP.dst% %IGMP.type% %IGMP.gaddr%") # noqa: E501
153 else:
154 return self.sprintf("IGMP %IGMP.type% %IGMP.gaddr%")
155
156
157 bind_layers(IP, IGMP, frag=0,
158 proto=2,
159 ttl=1)
160
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/scapy/contrib/igmp.py b/scapy/contrib/igmp.py
--- a/scapy/contrib/igmp.py
+++ b/scapy/contrib/igmp.py
@@ -114,6 +114,7 @@
adjusted to ensure correct formatting and assignment. The Ethernet header
is then adjusted to the proper IGMP packet format.
"""
+ from scapy.contrib.igmpv3 import IGMPv3
gaddr = self.gaddr if hasattr(self, "gaddr") and self.gaddr else "0.0.0.0" # noqa: E501
underlayer = self.underlayer
if self.type not in [0x11, 0x30]: # General Rule 1 # noqa: E501
@@ -131,6 +132,8 @@
underlayer.dst = "224.0.0.2" # IP rule 2 # noqa: E501
elif ((self.type == 0x12) or (self.type == 0x16)) and (isValidMCAddr(gaddr)): # noqa: E501
underlayer.dst = gaddr # IP rule 3b # noqa: E501
+ elif (self.type in [0x11, 0x22, 0x30, 0x31, 0x32] and isinstance(self, IGMPv3)):
+ pass
else:
warning("Invalid IGMP Type detected !")
return False
@@ -141,7 +144,6 @@
if _root.haslayer(Ether):
# Force recalculate Ether dst
_root[Ether].dst = getmacbyip(underlayer.dst) # Ether rule 1 # noqa: E501
- from scapy.contrib.igmpv3 import IGMPv3
if isinstance(self, IGMPv3):
self.encode_maxrespcode()
return True
|
{"golden_diff": "diff --git a/scapy/contrib/igmp.py b/scapy/contrib/igmp.py\n--- a/scapy/contrib/igmp.py\n+++ b/scapy/contrib/igmp.py\n@@ -114,6 +114,7 @@\n adjusted to ensure correct formatting and assignment. The Ethernet header\n is then adjusted to the proper IGMP packet format.\n \"\"\"\n+ from scapy.contrib.igmpv3 import IGMPv3\n gaddr = self.gaddr if hasattr(self, \"gaddr\") and self.gaddr else \"0.0.0.0\" # noqa: E501\n underlayer = self.underlayer\n if self.type not in [0x11, 0x30]: # General Rule 1 # noqa: E501\n@@ -131,6 +132,8 @@\n underlayer.dst = \"224.0.0.2\" # IP rule 2 # noqa: E501\n elif ((self.type == 0x12) or (self.type == 0x16)) and (isValidMCAddr(gaddr)): # noqa: E501\n underlayer.dst = gaddr # IP rule 3b # noqa: E501\n+ elif (self.type in [0x11, 0x22, 0x30, 0x31, 0x32] and isinstance(self, IGMPv3)):\n+ pass\n else:\n warning(\"Invalid IGMP Type detected !\")\n return False\n@@ -141,7 +144,6 @@\n if _root.haslayer(Ether):\n # Force recalculate Ether dst\n _root[Ether].dst = getmacbyip(underlayer.dst) # Ether rule 1 # noqa: E501\n- from scapy.contrib.igmpv3 import IGMPv3\n if isinstance(self, IGMPv3):\n self.encode_maxrespcode()\n return True\n", "issue": "IGMPv3 igmpize broken\n#### Brief description\r\n\r\nthe `igmpize` method is broken when used on IGMPv3 message types that don't appear in IGMP.\r\n\r\n#### Environment\r\n\r\n- Scapy version: `scapy version and/or commit-hash`\r\n aefcfd322999637abeed183447d60112070e7a35 (master)\r\n- Python version: `e.g. 3.5`\r\n Python 3.6.10\r\n- Operating System: `e.g. Minix 3.4`\r\n Nixos Unstable (nixos-20.09pre215947.82b54d49066)\r\n\r\n#### How to reproduce\r\n\r\n```\r\nfrom scapy.contrib.igmpv3 import *\r\n\r\na = Ether()\r\nb = IP()\r\nc = IGMPv3()/IGMPv3mr(records = [IGMPv3gr(maddr = \"232.1.1.10\", srcaddrs = \"10.0.0.10\")])\r\nx = a/b/c\r\nx[IGMPv3].igmpize()\r\n```\r\n\r\n1. construct a simple IGMPv3 packet (but not a membership query, which is shared with IGMP)\r\n2. try to `.igmpize()`\r\n\r\n#### Actual result\r\n\r\n```\r\n>>> x[IGMPv3].igmpize()\r\nWARNING: Invalid IGMP Type detected !\r\nFalse\r\n```\r\n\r\n#### Expected result\r\n\r\nIt would return `True`, and then I would be able to send the packet with sendp() and it would be valid.\r\n\n", "before_files": [{"content": "# This file is part of Scapy\n# Scapy is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 2 of the License, or\n# any later version.\n#\n# Scapy is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Scapy. If not, see <http://www.gnu.org/licenses/>.\n\n# scapy.contrib.description = Internet Group Management Protocol v1/v2 (IGMP/IGMPv2)\n# scapy.contrib.status = loads\n\nfrom __future__ import print_function\nfrom scapy.compat import chb, orb\nfrom scapy.error import warning\nfrom scapy.fields import ByteEnumField, ByteField, IPField, XShortField\nfrom scapy.layers.inet import IP, IPOption_Router_Alert\nfrom scapy.layers.l2 import Ether, getmacbyip\nfrom scapy.packet import bind_layers, Packet\nfrom scapy.utils import atol, checksum\n\n\ndef isValidMCAddr(ip):\n \"\"\"convert dotted quad string to long and check the first octet\"\"\"\n FirstOct = atol(ip) >> 24 & 0xFF\n return (FirstOct >= 224) and (FirstOct <= 239)\n\n\nclass IGMP(Packet):\n \"\"\"IGMP Message Class for v1 and v2.\n\n This class is derived from class Packet. You need call \"igmpize()\"\n so the packet is transformed according the RFC when sent.\n a=Ether(src=\"00:01:02:03:04:05\")\n b=IP(src=\"1.2.3.4\")\n c=IGMP(type=0x12, gaddr=\"224.2.3.4\")\n x = a/b/c\n x[IGMP].igmpize()\n sendp(a/b/c, iface=\"en0\")\n\n Parameters:\n type IGMP type field, 0x11, 0x12, 0x16 or 0x17\n mrcode Maximum Response time (zero for v1)\n gaddr Multicast Group Address 224.x.x.x/4\n\n See RFC2236, Section 2. Introduction for definitions of proper\n IGMPv2 message format http://www.faqs.org/rfcs/rfc2236.html\n \"\"\"\n name = \"IGMP\"\n\n igmptypes = {0x11: \"Group Membership Query\",\n 0x12: \"Version 1 - Membership Report\",\n 0x16: \"Version 2 - Membership Report\",\n 0x17: \"Leave Group\"}\n\n fields_desc = [ByteEnumField(\"type\", 0x11, igmptypes),\n ByteField(\"mrcode\", 20),\n XShortField(\"chksum\", None),\n IPField(\"gaddr\", \"0.0.0.0\")]\n\n def post_build(self, p, pay):\n \"\"\"Called implicitly before a packet is sent to compute and place IGMP checksum.\n\n Parameters:\n self The instantiation of an IGMP class\n p The IGMP message in hex in network byte order\n pay Additional payload for the IGMP message\n \"\"\"\n p += pay\n if self.chksum is None:\n ck = checksum(p)\n p = p[:2] + chb(ck >> 8) + chb(ck & 0xff) + p[4:]\n return p\n\n @classmethod\n def dispatch_hook(cls, _pkt=None, *args, **kargs):\n if _pkt and len(_pkt) >= 4:\n from scapy.contrib.igmpv3 import IGMPv3\n if orb(_pkt[0]) in [0x22, 0x30, 0x31, 0x32]:\n return IGMPv3\n if orb(_pkt[0]) == 0x11 and len(_pkt) >= 12:\n return IGMPv3\n return IGMP\n\n def igmpize(self):\n \"\"\"Called to explicitly fixup the packet according to the IGMP RFC\n\n The rules are:\n - General:\n 1. the Max Response time is meaningful only in Membership Queries and should be zero\n - IP:\n 1. Send General Group Query to 224.0.0.1 (all systems)\n 2. Send Leave Group to 224.0.0.2 (all routers)\n 3a.Otherwise send the packet to the group address\n 3b.Send reports/joins to the group address\n 4. ttl = 1 (RFC 2236, section 2)\n 5. send the packet with the router alert IP option (RFC 2236, section 2)\n - Ether:\n 1. Recalculate destination\n\n Returns:\n True The tuple ether/ip/self passed all check and represents\n a proper IGMP packet.\n False One of more validation checks failed and no fields\n were adjusted.\n\n The function will examine the IGMP message to assure proper format.\n Corrections will be attempted if possible. The IP header is then properly\n adjusted to ensure correct formatting and assignment. The Ethernet header\n is then adjusted to the proper IGMP packet format.\n \"\"\"\n gaddr = self.gaddr if hasattr(self, \"gaddr\") and self.gaddr else \"0.0.0.0\" # noqa: E501\n underlayer = self.underlayer\n if self.type not in [0x11, 0x30]: # General Rule 1 # noqa: E501\n self.mrcode = 0\n if isinstance(underlayer, IP):\n if (self.type == 0x11):\n if (gaddr == \"0.0.0.0\"):\n underlayer.dst = \"224.0.0.1\" # IP rule 1 # noqa: E501\n elif isValidMCAddr(gaddr):\n underlayer.dst = gaddr # IP rule 3a # noqa: E501\n else:\n warning(\"Invalid IGMP Group Address detected !\")\n return False\n elif ((self.type == 0x17) and isValidMCAddr(gaddr)):\n underlayer.dst = \"224.0.0.2\" # IP rule 2 # noqa: E501\n elif ((self.type == 0x12) or (self.type == 0x16)) and (isValidMCAddr(gaddr)): # noqa: E501\n underlayer.dst = gaddr # IP rule 3b # noqa: E501\n else:\n warning(\"Invalid IGMP Type detected !\")\n return False\n if not any(isinstance(x, IPOption_Router_Alert) for x in underlayer.options): # noqa: E501\n underlayer.options.append(IPOption_Router_Alert())\n underlayer.ttl = 1 # IP rule 4\n _root = self.firstlayer()\n if _root.haslayer(Ether):\n # Force recalculate Ether dst\n _root[Ether].dst = getmacbyip(underlayer.dst) # Ether rule 1 # noqa: E501\n from scapy.contrib.igmpv3 import IGMPv3\n if isinstance(self, IGMPv3):\n self.encode_maxrespcode()\n return True\n\n def mysummary(self):\n \"\"\"Display a summary of the IGMP object.\"\"\"\n if isinstance(self.underlayer, IP):\n return self.underlayer.sprintf(\"IGMP: %IP.src% > %IP.dst% %IGMP.type% %IGMP.gaddr%\") # noqa: E501\n else:\n return self.sprintf(\"IGMP %IGMP.type% %IGMP.gaddr%\")\n\n\nbind_layers(IP, IGMP, frag=0,\n proto=2,\n ttl=1)\n", "path": "scapy/contrib/igmp.py"}], "after_files": [{"content": "# This file is part of Scapy\n# Scapy is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 2 of the License, or\n# any later version.\n#\n# Scapy is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Scapy. If not, see <http://www.gnu.org/licenses/>.\n\n# scapy.contrib.description = Internet Group Management Protocol v1/v2 (IGMP/IGMPv2)\n# scapy.contrib.status = loads\n\nfrom __future__ import print_function\nfrom scapy.compat import chb, orb\nfrom scapy.error import warning\nfrom scapy.fields import ByteEnumField, ByteField, IPField, XShortField\nfrom scapy.layers.inet import IP, IPOption_Router_Alert\nfrom scapy.layers.l2 import Ether, getmacbyip\nfrom scapy.packet import bind_layers, Packet\nfrom scapy.utils import atol, checksum\n\n\ndef isValidMCAddr(ip):\n \"\"\"convert dotted quad string to long and check the first octet\"\"\"\n FirstOct = atol(ip) >> 24 & 0xFF\n return (FirstOct >= 224) and (FirstOct <= 239)\n\n\nclass IGMP(Packet):\n \"\"\"IGMP Message Class for v1 and v2.\n\n This class is derived from class Packet. You need call \"igmpize()\"\n so the packet is transformed according the RFC when sent.\n a=Ether(src=\"00:01:02:03:04:05\")\n b=IP(src=\"1.2.3.4\")\n c=IGMP(type=0x12, gaddr=\"224.2.3.4\")\n x = a/b/c\n x[IGMP].igmpize()\n sendp(a/b/c, iface=\"en0\")\n\n Parameters:\n type IGMP type field, 0x11, 0x12, 0x16 or 0x17\n mrcode Maximum Response time (zero for v1)\n gaddr Multicast Group Address 224.x.x.x/4\n\n See RFC2236, Section 2. Introduction for definitions of proper\n IGMPv2 message format http://www.faqs.org/rfcs/rfc2236.html\n \"\"\"\n name = \"IGMP\"\n\n igmptypes = {0x11: \"Group Membership Query\",\n 0x12: \"Version 1 - Membership Report\",\n 0x16: \"Version 2 - Membership Report\",\n 0x17: \"Leave Group\"}\n\n fields_desc = [ByteEnumField(\"type\", 0x11, igmptypes),\n ByteField(\"mrcode\", 20),\n XShortField(\"chksum\", None),\n IPField(\"gaddr\", \"0.0.0.0\")]\n\n def post_build(self, p, pay):\n \"\"\"Called implicitly before a packet is sent to compute and place IGMP checksum.\n\n Parameters:\n self The instantiation of an IGMP class\n p The IGMP message in hex in network byte order\n pay Additional payload for the IGMP message\n \"\"\"\n p += pay\n if self.chksum is None:\n ck = checksum(p)\n p = p[:2] + chb(ck >> 8) + chb(ck & 0xff) + p[4:]\n return p\n\n @classmethod\n def dispatch_hook(cls, _pkt=None, *args, **kargs):\n if _pkt and len(_pkt) >= 4:\n from scapy.contrib.igmpv3 import IGMPv3\n if orb(_pkt[0]) in [0x22, 0x30, 0x31, 0x32]:\n return IGMPv3\n if orb(_pkt[0]) == 0x11 and len(_pkt) >= 12:\n return IGMPv3\n return IGMP\n\n def igmpize(self):\n \"\"\"Called to explicitly fixup the packet according to the IGMP RFC\n\n The rules are:\n - General:\n 1. the Max Response time is meaningful only in Membership Queries and should be zero\n - IP:\n 1. Send General Group Query to 224.0.0.1 (all systems)\n 2. Send Leave Group to 224.0.0.2 (all routers)\n 3a.Otherwise send the packet to the group address\n 3b.Send reports/joins to the group address\n 4. ttl = 1 (RFC 2236, section 2)\n 5. send the packet with the router alert IP option (RFC 2236, section 2)\n - Ether:\n 1. Recalculate destination\n\n Returns:\n True The tuple ether/ip/self passed all check and represents\n a proper IGMP packet.\n False One of more validation checks failed and no fields\n were adjusted.\n\n The function will examine the IGMP message to assure proper format.\n Corrections will be attempted if possible. The IP header is then properly\n adjusted to ensure correct formatting and assignment. The Ethernet header\n is then adjusted to the proper IGMP packet format.\n \"\"\"\n from scapy.contrib.igmpv3 import IGMPv3\n gaddr = self.gaddr if hasattr(self, \"gaddr\") and self.gaddr else \"0.0.0.0\" # noqa: E501\n underlayer = self.underlayer\n if self.type not in [0x11, 0x30]: # General Rule 1 # noqa: E501\n self.mrcode = 0\n if isinstance(underlayer, IP):\n if (self.type == 0x11):\n if (gaddr == \"0.0.0.0\"):\n underlayer.dst = \"224.0.0.1\" # IP rule 1 # noqa: E501\n elif isValidMCAddr(gaddr):\n underlayer.dst = gaddr # IP rule 3a # noqa: E501\n else:\n warning(\"Invalid IGMP Group Address detected !\")\n return False\n elif ((self.type == 0x17) and isValidMCAddr(gaddr)):\n underlayer.dst = \"224.0.0.2\" # IP rule 2 # noqa: E501\n elif ((self.type == 0x12) or (self.type == 0x16)) and (isValidMCAddr(gaddr)): # noqa: E501\n underlayer.dst = gaddr # IP rule 3b # noqa: E501\n elif (self.type in [0x11, 0x22, 0x30, 0x31, 0x32] and isinstance(self, IGMPv3)):\n pass\n else:\n warning(\"Invalid IGMP Type detected !\")\n return False\n if not any(isinstance(x, IPOption_Router_Alert) for x in underlayer.options): # noqa: E501\n underlayer.options.append(IPOption_Router_Alert())\n underlayer.ttl = 1 # IP rule 4\n _root = self.firstlayer()\n if _root.haslayer(Ether):\n # Force recalculate Ether dst\n _root[Ether].dst = getmacbyip(underlayer.dst) # Ether rule 1 # noqa: E501\n if isinstance(self, IGMPv3):\n self.encode_maxrespcode()\n return True\n\n def mysummary(self):\n \"\"\"Display a summary of the IGMP object.\"\"\"\n if isinstance(self.underlayer, IP):\n return self.underlayer.sprintf(\"IGMP: %IP.src% > %IP.dst% %IGMP.type% %IGMP.gaddr%\") # noqa: E501\n else:\n return self.sprintf(\"IGMP %IGMP.type% %IGMP.gaddr%\")\n\n\nbind_layers(IP, IGMP, frag=0,\n proto=2,\n ttl=1)\n", "path": "scapy/contrib/igmp.py"}]}
| 2,848 | 458 |
gh_patches_debug_35756
|
rasdani/github-patches
|
git_diff
|
Qiskit__qiskit-4787
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
mcry mode='basic' does not use ancillas
<!-- ⚠️ If you do not respect this template, your issue will be closed -->
<!-- ⚠️ Make sure to browse the opened and closed issues -->
### Information
- **0.14.2**:
- **3**:
- **Ubuntu**:
### What is the current behavior?
The mcry gate (implemented in https://github.com/Qiskit/qiskit-terra/blob/master/qiskit/circuit/library/standard_gates/multi_control_rotation_gates.py) has a 'basic' mode that expects ancillas, but calls mct in its default 'noancilla' mode. (so both mcry(mode = 'noancilla') and mcry(mode='basic') use no ancillas and a lot of gates).
### Steps to reproduce the problem
### What is the expected behavior?
mcry in mode='basic' should call mct with mode='basic'
### Suggested solutions
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `qiskit/circuit/library/standard_gates/multi_control_rotation_gates.py`
Content:
```
1 # This code is part of Qiskit.
2 #
3 # (C) Copyright IBM 2018, 2019.
4 #
5 # This code is licensed under the Apache License, Version 2.0. You may
6 # obtain a copy of this license in the LICENSE.txt file in the root directory
7 # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
8 #
9 # Any modifications or derivative works of this code must retain this
10 # copyright notice, and modified files need to carry a notice indicating
11 # that they have been altered from the originals.
12 """
13 Multiple-Controlled U3 gate. Not using ancillary qubits.
14 """
15
16 import logging
17 from math import pi
18 from qiskit.circuit import QuantumCircuit, QuantumRegister, Qubit
19 from qiskit.circuit.library.standard_gates.u3 import _generate_gray_code
20 from qiskit.exceptions import QiskitError
21
22 logger = logging.getLogger(__name__)
23
24
25 def _apply_cu3(circuit, theta, phi, lam, control, target, use_basis_gates=True):
26 if use_basis_gates:
27 circuit.u1((lam + phi) / 2, control)
28 circuit.u1((lam - phi) / 2, target)
29 circuit.cx(control, target)
30 circuit.u3(-theta / 2, 0, -(phi + lam) / 2, target)
31 circuit.cx(control, target)
32 circuit.u3(theta / 2, phi, 0, target)
33 else:
34 circuit.cu3(theta, phi, lam, control, target)
35
36
37 def _apply_mcu3_graycode(circuit, theta, phi, lam, ctls, tgt, use_basis_gates):
38 """Apply multi-controlled u3 gate from ctls to tgt using graycode
39 pattern with single-step angles theta, phi, lam."""
40
41 n = len(ctls)
42
43 gray_code = _generate_gray_code(n)
44 last_pattern = None
45
46 for pattern in gray_code:
47 if '1' not in pattern:
48 continue
49 if last_pattern is None:
50 last_pattern = pattern
51 # find left most set bit
52 lm_pos = list(pattern).index('1')
53
54 # find changed bit
55 comp = [i != j for i, j in zip(pattern, last_pattern)]
56 if True in comp:
57 pos = comp.index(True)
58 else:
59 pos = None
60 if pos is not None:
61 if pos != lm_pos:
62 circuit.cx(ctls[pos], ctls[lm_pos])
63 else:
64 indices = [i for i, x in enumerate(pattern) if x == '1']
65 for idx in indices[1:]:
66 circuit.cx(ctls[idx], ctls[lm_pos])
67 # check parity and undo rotation
68 if pattern.count('1') % 2 == 0:
69 # inverse CU3: u3(theta, phi, lamb)^dagger = u3(-theta, -lam, -phi)
70 _apply_cu3(circuit, -theta, -lam, -phi, ctls[lm_pos], tgt,
71 use_basis_gates=use_basis_gates)
72 else:
73 _apply_cu3(circuit, theta, phi, lam, ctls[lm_pos], tgt,
74 use_basis_gates=use_basis_gates)
75 last_pattern = pattern
76
77
78 def mcrx(self, theta, q_controls, q_target, use_basis_gates=False):
79 """
80 Apply Multiple-Controlled X rotation gate
81
82 Args:
83 self (QuantumCircuit): The QuantumCircuit object to apply the mcrx gate on.
84 theta (float): angle theta
85 q_controls (list(Qubit)): The list of control qubits
86 q_target (Qubit): The target qubit
87 use_basis_gates (bool): use u1, u2, u3, cx, id
88
89 Raises:
90 QiskitError: parameter errors
91 """
92
93 # check controls
94 if isinstance(q_controls, QuantumRegister):
95 control_qubits = list(q_controls)
96 elif isinstance(q_controls, list):
97 control_qubits = q_controls
98 else:
99 raise QiskitError(
100 'The mcrx gate needs a list of qubits or a quantum register for controls.')
101
102 # check target
103 if isinstance(q_target, Qubit):
104 target_qubit = q_target
105 else:
106 raise QiskitError('The mcrx gate needs a single qubit as target.')
107
108 all_qubits = control_qubits + [target_qubit]
109
110 self._check_qargs(all_qubits)
111 self._check_dups(all_qubits)
112
113 n_c = len(control_qubits)
114 if n_c == 1: # cu3
115 _apply_cu3(self, theta, -pi/2, pi/2, control_qubits[0],
116 target_qubit, use_basis_gates=use_basis_gates)
117 else:
118 theta_step = theta * (1 / (2 ** (n_c - 1)))
119 _apply_mcu3_graycode(self, theta_step, -pi/2, pi/2, control_qubits,
120 target_qubit, use_basis_gates=use_basis_gates)
121
122
123 def mcry(self, theta, q_controls, q_target, q_ancillae, mode='basic',
124 use_basis_gates=False):
125 """
126 Apply Multiple-Controlled Y rotation gate
127
128 Args:
129 self (QuantumCircuit): The QuantumCircuit object to apply the mcry gate on.
130 theta (float): angle theta
131 q_controls (list(Qubit)): The list of control qubits
132 q_target (Qubit): The target qubit
133 q_ancillae (QuantumRegister or tuple(QuantumRegister, int)): The list of ancillary qubits.
134 mode (string): The implementation mode to use
135 use_basis_gates (bool): use u1, u2, u3, cx, id
136
137 Raises:
138 QiskitError: parameter errors
139 """
140
141 # check controls
142 if isinstance(q_controls, QuantumRegister):
143 control_qubits = list(q_controls)
144 elif isinstance(q_controls, list):
145 control_qubits = q_controls
146 else:
147 raise QiskitError('The mcry gate needs a list of qubits or a quantum '
148 'register for controls.')
149
150 # check target
151 if isinstance(q_target, Qubit):
152 target_qubit = q_target
153 else:
154 raise QiskitError('The mcry gate needs a single qubit as target.')
155
156 # check ancilla
157 if q_ancillae is None:
158 ancillary_qubits = []
159 elif isinstance(q_ancillae, QuantumRegister):
160 ancillary_qubits = list(q_ancillae)
161 elif isinstance(q_ancillae, list):
162 ancillary_qubits = q_ancillae
163 else:
164 raise QiskitError('The mcry gate needs None or a list of qubits or a '
165 'quantum register for ancilla.')
166
167 all_qubits = control_qubits + [target_qubit] + ancillary_qubits
168
169 self._check_qargs(all_qubits)
170 self._check_dups(all_qubits)
171
172 if mode == 'basic':
173 self.u3(theta / 2, 0, 0, q_target)
174 self.mct(q_controls, q_target, q_ancillae)
175 self.u3(-theta / 2, 0, 0, q_target)
176 self.mct(q_controls, q_target, q_ancillae)
177 elif mode == 'noancilla':
178 n_c = len(control_qubits)
179 if n_c == 1: # cu3
180 _apply_cu3(self, theta, 0, 0, control_qubits[0],
181 target_qubit, use_basis_gates=use_basis_gates)
182 else:
183 theta_step = theta * (1 / (2 ** (n_c - 1)))
184 _apply_mcu3_graycode(self, theta_step, 0, 0, control_qubits,
185 target_qubit, use_basis_gates=use_basis_gates)
186 else:
187 raise QiskitError('Unrecognized mode for building MCRY circuit: {}.'.format(mode))
188
189
190 def mcrz(self, lam, q_controls, q_target, use_basis_gates=False):
191 """
192 Apply Multiple-Controlled Z rotation gate
193
194 Args:
195 self (QuantumCircuit): The QuantumCircuit object to apply the mcrz gate on.
196 lam (float): angle lambda
197 q_controls (list(Qubit)): The list of control qubits
198 q_target (Qubit): The target qubit
199 use_basis_gates (bool): use u1, u2, u3, cx, id
200
201 Raises:
202 QiskitError: parameter errors
203 """
204
205 # check controls
206 if isinstance(q_controls, QuantumRegister):
207 control_qubits = list(q_controls)
208 elif isinstance(q_controls, list):
209 control_qubits = q_controls
210 else:
211 raise QiskitError(
212 'The mcrz gate needs a list of qubits or a quantum register for controls.')
213
214 # check target
215 if isinstance(q_target, Qubit):
216 target_qubit = q_target
217 else:
218 raise QiskitError('The mcrz gate needs a single qubit as target.')
219
220 all_qubits = control_qubits + [target_qubit]
221
222 self._check_qargs(all_qubits)
223 self._check_dups(all_qubits)
224
225 n_c = len(control_qubits)
226 if n_c == 1: # cu3
227 _apply_cu3(self, 0, 0, lam, control_qubits[0],
228 target_qubit, use_basis_gates=use_basis_gates)
229 else:
230 lam_step = lam * (1 / (2 ** (n_c - 1)))
231 _apply_mcu3_graycode(self, 0, 0, lam_step, control_qubits,
232 target_qubit, use_basis_gates=use_basis_gates)
233
234
235 QuantumCircuit.mcrx = mcrx
236 QuantumCircuit.mcry = mcry
237 QuantumCircuit.mcrz = mcrz
238
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/qiskit/circuit/library/standard_gates/multi_control_rotation_gates.py b/qiskit/circuit/library/standard_gates/multi_control_rotation_gates.py
--- a/qiskit/circuit/library/standard_gates/multi_control_rotation_gates.py
+++ b/qiskit/circuit/library/standard_gates/multi_control_rotation_gates.py
@@ -16,6 +16,7 @@
import logging
from math import pi
from qiskit.circuit import QuantumCircuit, QuantumRegister, Qubit
+from qiskit.circuit.library.standard_gates.x import MCXGate
from qiskit.circuit.library.standard_gates.u3 import _generate_gray_code
from qiskit.exceptions import QiskitError
@@ -31,7 +32,7 @@
circuit.cx(control, target)
circuit.u3(theta / 2, phi, 0, target)
else:
- circuit.cu3(theta, phi, lam, control, target)
+ circuit.cu(theta, phi, lam, 0, control, target)
def _apply_mcu3_graycode(circuit, theta, phi, lam, ctls, tgt, use_basis_gates):
@@ -120,7 +121,7 @@
target_qubit, use_basis_gates=use_basis_gates)
-def mcry(self, theta, q_controls, q_target, q_ancillae, mode='basic',
+def mcry(self, theta, q_controls, q_target, q_ancillae, mode=None,
use_basis_gates=False):
"""
Apply Multiple-Controlled Y rotation gate
@@ -169,11 +170,20 @@
self._check_qargs(all_qubits)
self._check_dups(all_qubits)
+ # auto-select the best mode
+ if mode is None:
+ # if enough ancillary qubits are provided, use the 'v-chain' method
+ additional_vchain = MCXGate.get_num_ancilla_qubits(len(control_qubits), 'v-chain')
+ if len(ancillary_qubits) >= additional_vchain:
+ mode = 'basic'
+ else:
+ mode = 'noancilla'
+
if mode == 'basic':
- self.u3(theta / 2, 0, 0, q_target)
- self.mct(q_controls, q_target, q_ancillae)
- self.u3(-theta / 2, 0, 0, q_target)
- self.mct(q_controls, q_target, q_ancillae)
+ self.ry(theta / 2, q_target)
+ self.mcx(q_controls, q_target, q_ancillae, mode='v-chain')
+ self.ry(-theta / 2, q_target)
+ self.mcx(q_controls, q_target, q_ancillae, mode='v-chain')
elif mode == 'noancilla':
n_c = len(control_qubits)
if n_c == 1: # cu3
|
{"golden_diff": "diff --git a/qiskit/circuit/library/standard_gates/multi_control_rotation_gates.py b/qiskit/circuit/library/standard_gates/multi_control_rotation_gates.py\n--- a/qiskit/circuit/library/standard_gates/multi_control_rotation_gates.py\n+++ b/qiskit/circuit/library/standard_gates/multi_control_rotation_gates.py\n@@ -16,6 +16,7 @@\n import logging\n from math import pi\n from qiskit.circuit import QuantumCircuit, QuantumRegister, Qubit\n+from qiskit.circuit.library.standard_gates.x import MCXGate\n from qiskit.circuit.library.standard_gates.u3 import _generate_gray_code\n from qiskit.exceptions import QiskitError\n \n@@ -31,7 +32,7 @@\n circuit.cx(control, target)\n circuit.u3(theta / 2, phi, 0, target)\n else:\n- circuit.cu3(theta, phi, lam, control, target)\n+ circuit.cu(theta, phi, lam, 0, control, target)\n \n \n def _apply_mcu3_graycode(circuit, theta, phi, lam, ctls, tgt, use_basis_gates):\n@@ -120,7 +121,7 @@\n target_qubit, use_basis_gates=use_basis_gates)\n \n \n-def mcry(self, theta, q_controls, q_target, q_ancillae, mode='basic',\n+def mcry(self, theta, q_controls, q_target, q_ancillae, mode=None,\n use_basis_gates=False):\n \"\"\"\n Apply Multiple-Controlled Y rotation gate\n@@ -169,11 +170,20 @@\n self._check_qargs(all_qubits)\n self._check_dups(all_qubits)\n \n+ # auto-select the best mode\n+ if mode is None:\n+ # if enough ancillary qubits are provided, use the 'v-chain' method\n+ additional_vchain = MCXGate.get_num_ancilla_qubits(len(control_qubits), 'v-chain')\n+ if len(ancillary_qubits) >= additional_vchain:\n+ mode = 'basic'\n+ else:\n+ mode = 'noancilla'\n+\n if mode == 'basic':\n- self.u3(theta / 2, 0, 0, q_target)\n- self.mct(q_controls, q_target, q_ancillae)\n- self.u3(-theta / 2, 0, 0, q_target)\n- self.mct(q_controls, q_target, q_ancillae)\n+ self.ry(theta / 2, q_target)\n+ self.mcx(q_controls, q_target, q_ancillae, mode='v-chain')\n+ self.ry(-theta / 2, q_target)\n+ self.mcx(q_controls, q_target, q_ancillae, mode='v-chain')\n elif mode == 'noancilla':\n n_c = len(control_qubits)\n if n_c == 1: # cu3\n", "issue": "mcry mode='basic' does not use ancillas\n<!-- \u26a0\ufe0f If you do not respect this template, your issue will be closed -->\r\n<!-- \u26a0\ufe0f Make sure to browse the opened and closed issues -->\r\n\r\n### Information\r\n\r\n- **0.14.2**:\r\n- **3**:\r\n- **Ubuntu**:\r\n\r\n### What is the current behavior?\r\nThe mcry gate (implemented in https://github.com/Qiskit/qiskit-terra/blob/master/qiskit/circuit/library/standard_gates/multi_control_rotation_gates.py) has a 'basic' mode that expects ancillas, but calls mct in its default 'noancilla' mode. (so both mcry(mode = 'noancilla') and mcry(mode='basic') use no ancillas and a lot of gates).\r\n\r\n\r\n### Steps to reproduce the problem\r\n\r\n\r\n\r\n### What is the expected behavior?\r\nmcry in mode='basic' should call mct with mode='basic'\r\n\r\n\r\n### Suggested solutions\r\n\r\n\r\n\n", "before_files": [{"content": "# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2018, 2019.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\"\"\"\nMultiple-Controlled U3 gate. Not using ancillary qubits.\n\"\"\"\n\nimport logging\nfrom math import pi\nfrom qiskit.circuit import QuantumCircuit, QuantumRegister, Qubit\nfrom qiskit.circuit.library.standard_gates.u3 import _generate_gray_code\nfrom qiskit.exceptions import QiskitError\n\nlogger = logging.getLogger(__name__)\n\n\ndef _apply_cu3(circuit, theta, phi, lam, control, target, use_basis_gates=True):\n if use_basis_gates:\n circuit.u1((lam + phi) / 2, control)\n circuit.u1((lam - phi) / 2, target)\n circuit.cx(control, target)\n circuit.u3(-theta / 2, 0, -(phi + lam) / 2, target)\n circuit.cx(control, target)\n circuit.u3(theta / 2, phi, 0, target)\n else:\n circuit.cu3(theta, phi, lam, control, target)\n\n\ndef _apply_mcu3_graycode(circuit, theta, phi, lam, ctls, tgt, use_basis_gates):\n \"\"\"Apply multi-controlled u3 gate from ctls to tgt using graycode\n pattern with single-step angles theta, phi, lam.\"\"\"\n\n n = len(ctls)\n\n gray_code = _generate_gray_code(n)\n last_pattern = None\n\n for pattern in gray_code:\n if '1' not in pattern:\n continue\n if last_pattern is None:\n last_pattern = pattern\n # find left most set bit\n lm_pos = list(pattern).index('1')\n\n # find changed bit\n comp = [i != j for i, j in zip(pattern, last_pattern)]\n if True in comp:\n pos = comp.index(True)\n else:\n pos = None\n if pos is not None:\n if pos != lm_pos:\n circuit.cx(ctls[pos], ctls[lm_pos])\n else:\n indices = [i for i, x in enumerate(pattern) if x == '1']\n for idx in indices[1:]:\n circuit.cx(ctls[idx], ctls[lm_pos])\n # check parity and undo rotation\n if pattern.count('1') % 2 == 0:\n # inverse CU3: u3(theta, phi, lamb)^dagger = u3(-theta, -lam, -phi)\n _apply_cu3(circuit, -theta, -lam, -phi, ctls[lm_pos], tgt,\n use_basis_gates=use_basis_gates)\n else:\n _apply_cu3(circuit, theta, phi, lam, ctls[lm_pos], tgt,\n use_basis_gates=use_basis_gates)\n last_pattern = pattern\n\n\ndef mcrx(self, theta, q_controls, q_target, use_basis_gates=False):\n \"\"\"\n Apply Multiple-Controlled X rotation gate\n\n Args:\n self (QuantumCircuit): The QuantumCircuit object to apply the mcrx gate on.\n theta (float): angle theta\n q_controls (list(Qubit)): The list of control qubits\n q_target (Qubit): The target qubit\n use_basis_gates (bool): use u1, u2, u3, cx, id\n\n Raises:\n QiskitError: parameter errors\n \"\"\"\n\n # check controls\n if isinstance(q_controls, QuantumRegister):\n control_qubits = list(q_controls)\n elif isinstance(q_controls, list):\n control_qubits = q_controls\n else:\n raise QiskitError(\n 'The mcrx gate needs a list of qubits or a quantum register for controls.')\n\n # check target\n if isinstance(q_target, Qubit):\n target_qubit = q_target\n else:\n raise QiskitError('The mcrx gate needs a single qubit as target.')\n\n all_qubits = control_qubits + [target_qubit]\n\n self._check_qargs(all_qubits)\n self._check_dups(all_qubits)\n\n n_c = len(control_qubits)\n if n_c == 1: # cu3\n _apply_cu3(self, theta, -pi/2, pi/2, control_qubits[0],\n target_qubit, use_basis_gates=use_basis_gates)\n else:\n theta_step = theta * (1 / (2 ** (n_c - 1)))\n _apply_mcu3_graycode(self, theta_step, -pi/2, pi/2, control_qubits,\n target_qubit, use_basis_gates=use_basis_gates)\n\n\ndef mcry(self, theta, q_controls, q_target, q_ancillae, mode='basic',\n use_basis_gates=False):\n \"\"\"\n Apply Multiple-Controlled Y rotation gate\n\n Args:\n self (QuantumCircuit): The QuantumCircuit object to apply the mcry gate on.\n theta (float): angle theta\n q_controls (list(Qubit)): The list of control qubits\n q_target (Qubit): The target qubit\n q_ancillae (QuantumRegister or tuple(QuantumRegister, int)): The list of ancillary qubits.\n mode (string): The implementation mode to use\n use_basis_gates (bool): use u1, u2, u3, cx, id\n\n Raises:\n QiskitError: parameter errors\n \"\"\"\n\n # check controls\n if isinstance(q_controls, QuantumRegister):\n control_qubits = list(q_controls)\n elif isinstance(q_controls, list):\n control_qubits = q_controls\n else:\n raise QiskitError('The mcry gate needs a list of qubits or a quantum '\n 'register for controls.')\n\n # check target\n if isinstance(q_target, Qubit):\n target_qubit = q_target\n else:\n raise QiskitError('The mcry gate needs a single qubit as target.')\n\n # check ancilla\n if q_ancillae is None:\n ancillary_qubits = []\n elif isinstance(q_ancillae, QuantumRegister):\n ancillary_qubits = list(q_ancillae)\n elif isinstance(q_ancillae, list):\n ancillary_qubits = q_ancillae\n else:\n raise QiskitError('The mcry gate needs None or a list of qubits or a '\n 'quantum register for ancilla.')\n\n all_qubits = control_qubits + [target_qubit] + ancillary_qubits\n\n self._check_qargs(all_qubits)\n self._check_dups(all_qubits)\n\n if mode == 'basic':\n self.u3(theta / 2, 0, 0, q_target)\n self.mct(q_controls, q_target, q_ancillae)\n self.u3(-theta / 2, 0, 0, q_target)\n self.mct(q_controls, q_target, q_ancillae)\n elif mode == 'noancilla':\n n_c = len(control_qubits)\n if n_c == 1: # cu3\n _apply_cu3(self, theta, 0, 0, control_qubits[0],\n target_qubit, use_basis_gates=use_basis_gates)\n else:\n theta_step = theta * (1 / (2 ** (n_c - 1)))\n _apply_mcu3_graycode(self, theta_step, 0, 0, control_qubits,\n target_qubit, use_basis_gates=use_basis_gates)\n else:\n raise QiskitError('Unrecognized mode for building MCRY circuit: {}.'.format(mode))\n\n\ndef mcrz(self, lam, q_controls, q_target, use_basis_gates=False):\n \"\"\"\n Apply Multiple-Controlled Z rotation gate\n\n Args:\n self (QuantumCircuit): The QuantumCircuit object to apply the mcrz gate on.\n lam (float): angle lambda\n q_controls (list(Qubit)): The list of control qubits\n q_target (Qubit): The target qubit\n use_basis_gates (bool): use u1, u2, u3, cx, id\n\n Raises:\n QiskitError: parameter errors\n \"\"\"\n\n # check controls\n if isinstance(q_controls, QuantumRegister):\n control_qubits = list(q_controls)\n elif isinstance(q_controls, list):\n control_qubits = q_controls\n else:\n raise QiskitError(\n 'The mcrz gate needs a list of qubits or a quantum register for controls.')\n\n # check target\n if isinstance(q_target, Qubit):\n target_qubit = q_target\n else:\n raise QiskitError('The mcrz gate needs a single qubit as target.')\n\n all_qubits = control_qubits + [target_qubit]\n\n self._check_qargs(all_qubits)\n self._check_dups(all_qubits)\n\n n_c = len(control_qubits)\n if n_c == 1: # cu3\n _apply_cu3(self, 0, 0, lam, control_qubits[0],\n target_qubit, use_basis_gates=use_basis_gates)\n else:\n lam_step = lam * (1 / (2 ** (n_c - 1)))\n _apply_mcu3_graycode(self, 0, 0, lam_step, control_qubits,\n target_qubit, use_basis_gates=use_basis_gates)\n\n\nQuantumCircuit.mcrx = mcrx\nQuantumCircuit.mcry = mcry\nQuantumCircuit.mcrz = mcrz\n", "path": "qiskit/circuit/library/standard_gates/multi_control_rotation_gates.py"}], "after_files": [{"content": "# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2018, 2019.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\"\"\"\nMultiple-Controlled U3 gate. Not using ancillary qubits.\n\"\"\"\n\nimport logging\nfrom math import pi\nfrom qiskit.circuit import QuantumCircuit, QuantumRegister, Qubit\nfrom qiskit.circuit.library.standard_gates.x import MCXGate\nfrom qiskit.circuit.library.standard_gates.u3 import _generate_gray_code\nfrom qiskit.exceptions import QiskitError\n\nlogger = logging.getLogger(__name__)\n\n\ndef _apply_cu3(circuit, theta, phi, lam, control, target, use_basis_gates=True):\n if use_basis_gates:\n circuit.u1((lam + phi) / 2, control)\n circuit.u1((lam - phi) / 2, target)\n circuit.cx(control, target)\n circuit.u3(-theta / 2, 0, -(phi + lam) / 2, target)\n circuit.cx(control, target)\n circuit.u3(theta / 2, phi, 0, target)\n else:\n circuit.cu(theta, phi, lam, 0, control, target)\n\n\ndef _apply_mcu3_graycode(circuit, theta, phi, lam, ctls, tgt, use_basis_gates):\n \"\"\"Apply multi-controlled u3 gate from ctls to tgt using graycode\n pattern with single-step angles theta, phi, lam.\"\"\"\n\n n = len(ctls)\n\n gray_code = _generate_gray_code(n)\n last_pattern = None\n\n for pattern in gray_code:\n if '1' not in pattern:\n continue\n if last_pattern is None:\n last_pattern = pattern\n # find left most set bit\n lm_pos = list(pattern).index('1')\n\n # find changed bit\n comp = [i != j for i, j in zip(pattern, last_pattern)]\n if True in comp:\n pos = comp.index(True)\n else:\n pos = None\n if pos is not None:\n if pos != lm_pos:\n circuit.cx(ctls[pos], ctls[lm_pos])\n else:\n indices = [i for i, x in enumerate(pattern) if x == '1']\n for idx in indices[1:]:\n circuit.cx(ctls[idx], ctls[lm_pos])\n # check parity and undo rotation\n if pattern.count('1') % 2 == 0:\n # inverse CU3: u3(theta, phi, lamb)^dagger = u3(-theta, -lam, -phi)\n _apply_cu3(circuit, -theta, -lam, -phi, ctls[lm_pos], tgt,\n use_basis_gates=use_basis_gates)\n else:\n _apply_cu3(circuit, theta, phi, lam, ctls[lm_pos], tgt,\n use_basis_gates=use_basis_gates)\n last_pattern = pattern\n\n\ndef mcrx(self, theta, q_controls, q_target, use_basis_gates=False):\n \"\"\"\n Apply Multiple-Controlled X rotation gate\n\n Args:\n self (QuantumCircuit): The QuantumCircuit object to apply the mcrx gate on.\n theta (float): angle theta\n q_controls (list(Qubit)): The list of control qubits\n q_target (Qubit): The target qubit\n use_basis_gates (bool): use u1, u2, u3, cx, id\n\n Raises:\n QiskitError: parameter errors\n \"\"\"\n\n # check controls\n if isinstance(q_controls, QuantumRegister):\n control_qubits = list(q_controls)\n elif isinstance(q_controls, list):\n control_qubits = q_controls\n else:\n raise QiskitError(\n 'The mcrx gate needs a list of qubits or a quantum register for controls.')\n\n # check target\n if isinstance(q_target, Qubit):\n target_qubit = q_target\n else:\n raise QiskitError('The mcrx gate needs a single qubit as target.')\n\n all_qubits = control_qubits + [target_qubit]\n\n self._check_qargs(all_qubits)\n self._check_dups(all_qubits)\n\n n_c = len(control_qubits)\n if n_c == 1: # cu3\n _apply_cu3(self, theta, -pi/2, pi/2, control_qubits[0],\n target_qubit, use_basis_gates=use_basis_gates)\n else:\n theta_step = theta * (1 / (2 ** (n_c - 1)))\n _apply_mcu3_graycode(self, theta_step, -pi/2, pi/2, control_qubits,\n target_qubit, use_basis_gates=use_basis_gates)\n\n\ndef mcry(self, theta, q_controls, q_target, q_ancillae, mode=None,\n use_basis_gates=False):\n \"\"\"\n Apply Multiple-Controlled Y rotation gate\n\n Args:\n self (QuantumCircuit): The QuantumCircuit object to apply the mcry gate on.\n theta (float): angle theta\n q_controls (list(Qubit)): The list of control qubits\n q_target (Qubit): The target qubit\n q_ancillae (QuantumRegister or tuple(QuantumRegister, int)): The list of ancillary qubits.\n mode (string): The implementation mode to use\n use_basis_gates (bool): use u1, u2, u3, cx, id\n\n Raises:\n QiskitError: parameter errors\n \"\"\"\n\n # check controls\n if isinstance(q_controls, QuantumRegister):\n control_qubits = list(q_controls)\n elif isinstance(q_controls, list):\n control_qubits = q_controls\n else:\n raise QiskitError('The mcry gate needs a list of qubits or a quantum '\n 'register for controls.')\n\n # check target\n if isinstance(q_target, Qubit):\n target_qubit = q_target\n else:\n raise QiskitError('The mcry gate needs a single qubit as target.')\n\n # check ancilla\n if q_ancillae is None:\n ancillary_qubits = []\n elif isinstance(q_ancillae, QuantumRegister):\n ancillary_qubits = list(q_ancillae)\n elif isinstance(q_ancillae, list):\n ancillary_qubits = q_ancillae\n else:\n raise QiskitError('The mcry gate needs None or a list of qubits or a '\n 'quantum register for ancilla.')\n\n all_qubits = control_qubits + [target_qubit] + ancillary_qubits\n\n self._check_qargs(all_qubits)\n self._check_dups(all_qubits)\n\n # auto-select the best mode\n if mode is None:\n # if enough ancillary qubits are provided, use the 'v-chain' method\n additional_vchain = MCXGate.get_num_ancilla_qubits(len(control_qubits), 'v-chain')\n if len(ancillary_qubits) >= additional_vchain:\n mode = 'basic'\n else:\n mode = 'noancilla'\n\n if mode == 'basic':\n self.ry(theta / 2, q_target)\n self.mcx(q_controls, q_target, q_ancillae, mode='v-chain')\n self.ry(-theta / 2, q_target)\n self.mcx(q_controls, q_target, q_ancillae, mode='v-chain')\n elif mode == 'noancilla':\n n_c = len(control_qubits)\n if n_c == 1: # cu3\n _apply_cu3(self, theta, 0, 0, control_qubits[0],\n target_qubit, use_basis_gates=use_basis_gates)\n else:\n theta_step = theta * (1 / (2 ** (n_c - 1)))\n _apply_mcu3_graycode(self, theta_step, 0, 0, control_qubits,\n target_qubit, use_basis_gates=use_basis_gates)\n else:\n raise QiskitError('Unrecognized mode for building MCRY circuit: {}.'.format(mode))\n\n\ndef mcrz(self, lam, q_controls, q_target, use_basis_gates=False):\n \"\"\"\n Apply Multiple-Controlled Z rotation gate\n\n Args:\n self (QuantumCircuit): The QuantumCircuit object to apply the mcrz gate on.\n lam (float): angle lambda\n q_controls (list(Qubit)): The list of control qubits\n q_target (Qubit): The target qubit\n use_basis_gates (bool): use u1, u2, u3, cx, id\n\n Raises:\n QiskitError: parameter errors\n \"\"\"\n\n # check controls\n if isinstance(q_controls, QuantumRegister):\n control_qubits = list(q_controls)\n elif isinstance(q_controls, list):\n control_qubits = q_controls\n else:\n raise QiskitError(\n 'The mcrz gate needs a list of qubits or a quantum register for controls.')\n\n # check target\n if isinstance(q_target, Qubit):\n target_qubit = q_target\n else:\n raise QiskitError('The mcrz gate needs a single qubit as target.')\n\n all_qubits = control_qubits + [target_qubit]\n\n self._check_qargs(all_qubits)\n self._check_dups(all_qubits)\n\n n_c = len(control_qubits)\n if n_c == 1: # cu3\n _apply_cu3(self, 0, 0, lam, control_qubits[0],\n target_qubit, use_basis_gates=use_basis_gates)\n else:\n lam_step = lam * (1 / (2 ** (n_c - 1)))\n _apply_mcu3_graycode(self, 0, 0, lam_step, control_qubits,\n target_qubit, use_basis_gates=use_basis_gates)\n\n\nQuantumCircuit.mcrx = mcrx\nQuantumCircuit.mcry = mcry\nQuantumCircuit.mcrz = mcrz\n", "path": "qiskit/circuit/library/standard_gates/multi_control_rotation_gates.py"}]}
| 3,313 | 673 |
gh_patches_debug_38758
|
rasdani/github-patches
|
git_diff
|
python-discord__site-1104
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Consider dropping deploy preview support for redirects app
Do we need previews of the legacy redirects?
If not, we may be able to remove a lot of code from the redirects app.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pydis_site/apps/redirect/urls.py`
Content:
```
1 import dataclasses
2 import re
3
4 import yaml
5 from django import conf
6 from django.http import HttpResponse
7 from django.urls import URLPattern, path
8 from django_distill import distill_path
9
10 from pydis_site import settings
11 from pydis_site.apps.content import urls as pages_urls
12 from pydis_site.apps.redirect.views import CustomRedirectView
13 from pydis_site.apps.resources import urls as resources_urls
14
15 app_name = "redirect"
16
17
18 __PARAMETER_REGEX = re.compile(r"<\w+:\w+>")
19 REDIRECT_TEMPLATE = "<meta http-equiv=\"refresh\" content=\"0; URL={url}\"/>"
20
21
22 @dataclasses.dataclass(frozen=True)
23 class Redirect:
24 """Metadata about a redirect route."""
25
26 original_path: str
27 redirect_route: str
28 redirect_arguments: tuple[str] = tuple()
29
30 prefix_redirect: bool = False
31
32
33 def map_redirect(name: str, data: Redirect) -> list[URLPattern]:
34 """Return a pattern using the Redirects app, or a static HTML redirect for static builds."""
35 if not settings.STATIC_BUILD:
36 # Normal dynamic redirect
37 return [path(
38 data.original_path,
39 CustomRedirectView.as_view(
40 pattern_name=data.redirect_route,
41 static_args=tuple(data.redirect_arguments),
42 prefix_redirect=data.prefix_redirect
43 ),
44 name=name
45 )]
46
47 # Create static HTML redirects for static builds
48 new_app_name = data.redirect_route.split(":")[0]
49
50 if __PARAMETER_REGEX.search(data.original_path):
51 # Redirects for paths which accept parameters
52 # We generate an HTML redirect file for all possible entries
53 paths = []
54
55 class RedirectFunc:
56 def __init__(self, new_url: str, _name: str):
57 self.result = HttpResponse(REDIRECT_TEMPLATE.format(url=new_url))
58 self.__qualname__ = _name
59
60 def __call__(self, *args, **kwargs):
61 return self.result
62
63 if new_app_name == resources_urls.app_name:
64 items = resources_urls.get_all_resources()
65 elif new_app_name == pages_urls.app_name:
66 items = pages_urls.get_all_pages()
67 else:
68 raise ValueError(f"Unknown app in redirect: {new_app_name}")
69
70 for item in items:
71 entry = next(iter(item.values()))
72
73 # Replace dynamic redirect with concrete path
74 concrete_path = __PARAMETER_REGEX.sub(entry, data.original_path)
75 new_redirect = f"/{new_app_name}/{entry}"
76 pattern_name = f"{name}_{entry}"
77
78 paths.append(distill_path(
79 concrete_path,
80 RedirectFunc(new_redirect, pattern_name),
81 name=pattern_name
82 ))
83
84 return paths
85
86 redirect_path_name = "pages" if new_app_name == "content" else new_app_name
87 if len(data.redirect_arguments) > 0:
88 redirect_arg = data.redirect_arguments[0]
89 else:
90 redirect_arg = "resources/"
91 new_redirect = f"/{redirect_path_name}/{redirect_arg}"
92
93 if new_redirect == "/resources/resources/":
94 new_redirect = "/resources/"
95
96 return [distill_path(
97 data.original_path,
98 lambda *args: HttpResponse(REDIRECT_TEMPLATE.format(url=new_redirect)),
99 name=name,
100 )]
101
102
103 urlpatterns = []
104 for _name, _data in yaml.safe_load(conf.settings.REDIRECTIONS_PATH.read_text()).items():
105 urlpatterns.extend(map_redirect(_name, Redirect(**_data)))
106
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pydis_site/apps/redirect/urls.py b/pydis_site/apps/redirect/urls.py
--- a/pydis_site/apps/redirect/urls.py
+++ b/pydis_site/apps/redirect/urls.py
@@ -3,14 +3,9 @@
import yaml
from django import conf
-from django.http import HttpResponse
from django.urls import URLPattern, path
-from django_distill import distill_path
-from pydis_site import settings
-from pydis_site.apps.content import urls as pages_urls
from pydis_site.apps.redirect.views import CustomRedirectView
-from pydis_site.apps.resources import urls as resources_urls
app_name = "redirect"
@@ -31,72 +26,15 @@
def map_redirect(name: str, data: Redirect) -> list[URLPattern]:
- """Return a pattern using the Redirects app, or a static HTML redirect for static builds."""
- if not settings.STATIC_BUILD:
- # Normal dynamic redirect
- return [path(
- data.original_path,
- CustomRedirectView.as_view(
- pattern_name=data.redirect_route,
- static_args=tuple(data.redirect_arguments),
- prefix_redirect=data.prefix_redirect
- ),
- name=name
- )]
-
- # Create static HTML redirects for static builds
- new_app_name = data.redirect_route.split(":")[0]
-
- if __PARAMETER_REGEX.search(data.original_path):
- # Redirects for paths which accept parameters
- # We generate an HTML redirect file for all possible entries
- paths = []
-
- class RedirectFunc:
- def __init__(self, new_url: str, _name: str):
- self.result = HttpResponse(REDIRECT_TEMPLATE.format(url=new_url))
- self.__qualname__ = _name
-
- def __call__(self, *args, **kwargs):
- return self.result
-
- if new_app_name == resources_urls.app_name:
- items = resources_urls.get_all_resources()
- elif new_app_name == pages_urls.app_name:
- items = pages_urls.get_all_pages()
- else:
- raise ValueError(f"Unknown app in redirect: {new_app_name}")
-
- for item in items:
- entry = next(iter(item.values()))
-
- # Replace dynamic redirect with concrete path
- concrete_path = __PARAMETER_REGEX.sub(entry, data.original_path)
- new_redirect = f"/{new_app_name}/{entry}"
- pattern_name = f"{name}_{entry}"
-
- paths.append(distill_path(
- concrete_path,
- RedirectFunc(new_redirect, pattern_name),
- name=pattern_name
- ))
-
- return paths
-
- redirect_path_name = "pages" if new_app_name == "content" else new_app_name
- if len(data.redirect_arguments) > 0:
- redirect_arg = data.redirect_arguments[0]
- else:
- redirect_arg = "resources/"
- new_redirect = f"/{redirect_path_name}/{redirect_arg}"
-
- if new_redirect == "/resources/resources/":
- new_redirect = "/resources/"
-
- return [distill_path(
+ """Return a pattern using the Redirects app."""
+ return [path(
data.original_path,
- lambda *args: HttpResponse(REDIRECT_TEMPLATE.format(url=new_redirect)),
- name=name,
+ CustomRedirectView.as_view(
+ pattern_name=data.redirect_route,
+ static_args=tuple(data.redirect_arguments),
+ prefix_redirect=data.prefix_redirect
+ ),
+ name=name
)]
|
{"golden_diff": "diff --git a/pydis_site/apps/redirect/urls.py b/pydis_site/apps/redirect/urls.py\n--- a/pydis_site/apps/redirect/urls.py\n+++ b/pydis_site/apps/redirect/urls.py\n@@ -3,14 +3,9 @@\n \n import yaml\n from django import conf\n-from django.http import HttpResponse\n from django.urls import URLPattern, path\n-from django_distill import distill_path\n \n-from pydis_site import settings\n-from pydis_site.apps.content import urls as pages_urls\n from pydis_site.apps.redirect.views import CustomRedirectView\n-from pydis_site.apps.resources import urls as resources_urls\n \n app_name = \"redirect\"\n \n@@ -31,72 +26,15 @@\n \n \n def map_redirect(name: str, data: Redirect) -> list[URLPattern]:\n- \"\"\"Return a pattern using the Redirects app, or a static HTML redirect for static builds.\"\"\"\n- if not settings.STATIC_BUILD:\n- # Normal dynamic redirect\n- return [path(\n- data.original_path,\n- CustomRedirectView.as_view(\n- pattern_name=data.redirect_route,\n- static_args=tuple(data.redirect_arguments),\n- prefix_redirect=data.prefix_redirect\n- ),\n- name=name\n- )]\n-\n- # Create static HTML redirects for static builds\n- new_app_name = data.redirect_route.split(\":\")[0]\n-\n- if __PARAMETER_REGEX.search(data.original_path):\n- # Redirects for paths which accept parameters\n- # We generate an HTML redirect file for all possible entries\n- paths = []\n-\n- class RedirectFunc:\n- def __init__(self, new_url: str, _name: str):\n- self.result = HttpResponse(REDIRECT_TEMPLATE.format(url=new_url))\n- self.__qualname__ = _name\n-\n- def __call__(self, *args, **kwargs):\n- return self.result\n-\n- if new_app_name == resources_urls.app_name:\n- items = resources_urls.get_all_resources()\n- elif new_app_name == pages_urls.app_name:\n- items = pages_urls.get_all_pages()\n- else:\n- raise ValueError(f\"Unknown app in redirect: {new_app_name}\")\n-\n- for item in items:\n- entry = next(iter(item.values()))\n-\n- # Replace dynamic redirect with concrete path\n- concrete_path = __PARAMETER_REGEX.sub(entry, data.original_path)\n- new_redirect = f\"/{new_app_name}/{entry}\"\n- pattern_name = f\"{name}_{entry}\"\n-\n- paths.append(distill_path(\n- concrete_path,\n- RedirectFunc(new_redirect, pattern_name),\n- name=pattern_name\n- ))\n-\n- return paths\n-\n- redirect_path_name = \"pages\" if new_app_name == \"content\" else new_app_name\n- if len(data.redirect_arguments) > 0:\n- redirect_arg = data.redirect_arguments[0]\n- else:\n- redirect_arg = \"resources/\"\n- new_redirect = f\"/{redirect_path_name}/{redirect_arg}\"\n-\n- if new_redirect == \"/resources/resources/\":\n- new_redirect = \"/resources/\"\n-\n- return [distill_path(\n+ \"\"\"Return a pattern using the Redirects app.\"\"\"\n+ return [path(\n data.original_path,\n- lambda *args: HttpResponse(REDIRECT_TEMPLATE.format(url=new_redirect)),\n- name=name,\n+ CustomRedirectView.as_view(\n+ pattern_name=data.redirect_route,\n+ static_args=tuple(data.redirect_arguments),\n+ prefix_redirect=data.prefix_redirect\n+ ),\n+ name=name\n )]\n", "issue": "Consider dropping deploy preview support for redirects app\nDo we need previews of the legacy redirects?\n\nIf not, we may be able to remove a lot of code from the redirects app.\n", "before_files": [{"content": "import dataclasses\nimport re\n\nimport yaml\nfrom django import conf\nfrom django.http import HttpResponse\nfrom django.urls import URLPattern, path\nfrom django_distill import distill_path\n\nfrom pydis_site import settings\nfrom pydis_site.apps.content import urls as pages_urls\nfrom pydis_site.apps.redirect.views import CustomRedirectView\nfrom pydis_site.apps.resources import urls as resources_urls\n\napp_name = \"redirect\"\n\n\n__PARAMETER_REGEX = re.compile(r\"<\\w+:\\w+>\")\nREDIRECT_TEMPLATE = \"<meta http-equiv=\\\"refresh\\\" content=\\\"0; URL={url}\\\"/>\"\n\n\[email protected](frozen=True)\nclass Redirect:\n \"\"\"Metadata about a redirect route.\"\"\"\n\n original_path: str\n redirect_route: str\n redirect_arguments: tuple[str] = tuple()\n\n prefix_redirect: bool = False\n\n\ndef map_redirect(name: str, data: Redirect) -> list[URLPattern]:\n \"\"\"Return a pattern using the Redirects app, or a static HTML redirect for static builds.\"\"\"\n if not settings.STATIC_BUILD:\n # Normal dynamic redirect\n return [path(\n data.original_path,\n CustomRedirectView.as_view(\n pattern_name=data.redirect_route,\n static_args=tuple(data.redirect_arguments),\n prefix_redirect=data.prefix_redirect\n ),\n name=name\n )]\n\n # Create static HTML redirects for static builds\n new_app_name = data.redirect_route.split(\":\")[0]\n\n if __PARAMETER_REGEX.search(data.original_path):\n # Redirects for paths which accept parameters\n # We generate an HTML redirect file for all possible entries\n paths = []\n\n class RedirectFunc:\n def __init__(self, new_url: str, _name: str):\n self.result = HttpResponse(REDIRECT_TEMPLATE.format(url=new_url))\n self.__qualname__ = _name\n\n def __call__(self, *args, **kwargs):\n return self.result\n\n if new_app_name == resources_urls.app_name:\n items = resources_urls.get_all_resources()\n elif new_app_name == pages_urls.app_name:\n items = pages_urls.get_all_pages()\n else:\n raise ValueError(f\"Unknown app in redirect: {new_app_name}\")\n\n for item in items:\n entry = next(iter(item.values()))\n\n # Replace dynamic redirect with concrete path\n concrete_path = __PARAMETER_REGEX.sub(entry, data.original_path)\n new_redirect = f\"/{new_app_name}/{entry}\"\n pattern_name = f\"{name}_{entry}\"\n\n paths.append(distill_path(\n concrete_path,\n RedirectFunc(new_redirect, pattern_name),\n name=pattern_name\n ))\n\n return paths\n\n redirect_path_name = \"pages\" if new_app_name == \"content\" else new_app_name\n if len(data.redirect_arguments) > 0:\n redirect_arg = data.redirect_arguments[0]\n else:\n redirect_arg = \"resources/\"\n new_redirect = f\"/{redirect_path_name}/{redirect_arg}\"\n\n if new_redirect == \"/resources/resources/\":\n new_redirect = \"/resources/\"\n\n return [distill_path(\n data.original_path,\n lambda *args: HttpResponse(REDIRECT_TEMPLATE.format(url=new_redirect)),\n name=name,\n )]\n\n\nurlpatterns = []\nfor _name, _data in yaml.safe_load(conf.settings.REDIRECTIONS_PATH.read_text()).items():\n urlpatterns.extend(map_redirect(_name, Redirect(**_data)))\n", "path": "pydis_site/apps/redirect/urls.py"}], "after_files": [{"content": "import dataclasses\nimport re\n\nimport yaml\nfrom django import conf\nfrom django.urls import URLPattern, path\n\nfrom pydis_site.apps.redirect.views import CustomRedirectView\n\napp_name = \"redirect\"\n\n\n__PARAMETER_REGEX = re.compile(r\"<\\w+:\\w+>\")\nREDIRECT_TEMPLATE = \"<meta http-equiv=\\\"refresh\\\" content=\\\"0; URL={url}\\\"/>\"\n\n\[email protected](frozen=True)\nclass Redirect:\n \"\"\"Metadata about a redirect route.\"\"\"\n\n original_path: str\n redirect_route: str\n redirect_arguments: tuple[str] = tuple()\n\n prefix_redirect: bool = False\n\n\ndef map_redirect(name: str, data: Redirect) -> list[URLPattern]:\n \"\"\"Return a pattern using the Redirects app.\"\"\"\n return [path(\n data.original_path,\n CustomRedirectView.as_view(\n pattern_name=data.redirect_route,\n static_args=tuple(data.redirect_arguments),\n prefix_redirect=data.prefix_redirect\n ),\n name=name\n )]\n\n\nurlpatterns = []\nfor _name, _data in yaml.safe_load(conf.settings.REDIRECTIONS_PATH.read_text()).items():\n urlpatterns.extend(map_redirect(_name, Redirect(**_data)))\n", "path": "pydis_site/apps/redirect/urls.py"}]}
| 1,237 | 773 |
gh_patches_debug_7175
|
rasdani/github-patches
|
git_diff
|
RedHatInsights__insights-core-2743
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Remove defunct entry_points
These scripts no longer exist. We should remove the entry_points.
* [insights.tools.generate_api_config](https://github.com/RedHatInsights/insights-core/blob/master/setup.py#L23)
* [insights.tools.perf](https://github.com/RedHatInsights/insights-core/blob/master/setup.py#L24)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 import os
2 import sys
3 from setuptools import setup, find_packages
4
5 __here__ = os.path.dirname(os.path.abspath(__file__))
6
7 package_info = dict.fromkeys(["RELEASE", "COMMIT", "VERSION", "NAME"])
8
9 for name in package_info:
10 with open(os.path.join(__here__, "insights", name)) as f:
11 package_info[name] = f.read().strip()
12
13 entry_points = {
14 'console_scripts': [
15 'insights-collect = insights.collect:main',
16 'insights-run = insights:main',
17 'insights = insights.command_parser:main',
18 'insights-cat = insights.tools.cat:main',
19 'insights-dupkeycheck = insights.tools.dupkeycheck:main',
20 'insights-inspect = insights.tools.insights_inspect:main',
21 'insights-info = insights.tools.query:main',
22 'insights-ocpshell= insights.ocpshell:main',
23 'gen_api = insights.tools.generate_api_config:main',
24 'insights-perf = insights.tools.perf:main',
25 'client = insights.client:run',
26 'mangle = insights.util.mangle:main'
27 ]
28 }
29
30 runtime = set([
31 'six',
32 'requests',
33 'redis',
34 'cachecontrol',
35 'cachecontrol[redis]',
36 'cachecontrol[filecache]',
37 'defusedxml',
38 'lockfile',
39 'jinja2',
40 ])
41
42 if (sys.version_info < (2, 7)):
43 runtime.add('pyyaml>=3.10,<=3.13')
44 else:
45 runtime.add('pyyaml')
46
47
48 def maybe_require(pkg):
49 try:
50 __import__(pkg)
51 except ImportError:
52 runtime.add(pkg)
53
54
55 maybe_require("importlib")
56 maybe_require("argparse")
57
58
59 client = set([
60 'requests'
61 ])
62
63 develop = set([
64 'futures==3.0.5',
65 'wheel',
66 ])
67
68 docs = set([
69 'Sphinx<=3.0.2',
70 'nbsphinx',
71 'sphinx_rtd_theme',
72 'ipython',
73 'colorama',
74 'jinja2',
75 'Pygments'
76 ])
77
78 testing = set([
79 'coverage==4.3.4',
80 'pytest==3.0.6',
81 'pytest-cov==2.4.0',
82 'mock==2.0.0',
83 ])
84
85 cluster = set([
86 'ansible',
87 'pandas',
88 'colorama',
89 ])
90
91 openshift = set([
92 'openshift'
93 ])
94
95 linting = set([
96 'flake8==2.6.2',
97 ])
98
99 optional = set([
100 'python-cjson',
101 'python-logstash',
102 'python-statsd',
103 'watchdog',
104 ])
105
106 if __name__ == "__main__":
107 # allows for runtime modification of rpm name
108 name = os.environ.get("INSIGHTS_CORE_NAME", package_info["NAME"])
109
110 setup(
111 name=name,
112 version=package_info["VERSION"],
113 description="Insights Core is a data collection and analysis framework",
114 long_description=open("README.rst").read(),
115 url="https://github.com/redhatinsights/insights-core",
116 author="Red Hat, Inc.",
117 author_email="[email protected]",
118 packages=find_packages(),
119 install_requires=list(runtime),
120 package_data={'': ['LICENSE']},
121 license='Apache 2.0',
122 extras_require={
123 'develop': list(runtime | develop | client | docs | linting | testing | cluster),
124 'develop26': list(runtime | develop | client | linting | testing | cluster),
125 'client': list(runtime | client),
126 'client-develop': list(runtime | develop | client | linting | testing),
127 'cluster': list(runtime | cluster),
128 'openshift': list(runtime | openshift),
129 'optional': list(optional),
130 'docs': list(docs),
131 'linting': list(linting | client),
132 'testing': list(testing | client)
133 },
134 classifiers=[
135 'Development Status :: 5 - Production/Stable',
136 'Intended Audience :: Developers',
137 'Natural Language :: English',
138 'License :: OSI Approved :: Apache Software License',
139 'Programming Language :: Python',
140 'Programming Language :: Python :: 2.6',
141 'Programming Language :: Python :: 2.7',
142 'Programming Language :: Python :: 3.3',
143 'Programming Language :: Python :: 3.4',
144 'Programming Language :: Python :: 3.5',
145 'Programming Language :: Python :: 3.6'
146 ],
147 entry_points=entry_points,
148 include_package_data=True
149 )
150
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -20,8 +20,6 @@
'insights-inspect = insights.tools.insights_inspect:main',
'insights-info = insights.tools.query:main',
'insights-ocpshell= insights.ocpshell:main',
- 'gen_api = insights.tools.generate_api_config:main',
- 'insights-perf = insights.tools.perf:main',
'client = insights.client:run',
'mangle = insights.util.mangle:main'
]
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -20,8 +20,6 @@\n 'insights-inspect = insights.tools.insights_inspect:main',\n 'insights-info = insights.tools.query:main',\n 'insights-ocpshell= insights.ocpshell:main',\n- 'gen_api = insights.tools.generate_api_config:main',\n- 'insights-perf = insights.tools.perf:main',\n 'client = insights.client:run',\n 'mangle = insights.util.mangle:main'\n ]\n", "issue": "Remove defunct entry_points\nThese scripts no longer exist. We should remove the entry_points.\r\n\r\n* [insights.tools.generate_api_config](https://github.com/RedHatInsights/insights-core/blob/master/setup.py#L23)\r\n* [insights.tools.perf](https://github.com/RedHatInsights/insights-core/blob/master/setup.py#L24)\n", "before_files": [{"content": "import os\nimport sys\nfrom setuptools import setup, find_packages\n\n__here__ = os.path.dirname(os.path.abspath(__file__))\n\npackage_info = dict.fromkeys([\"RELEASE\", \"COMMIT\", \"VERSION\", \"NAME\"])\n\nfor name in package_info:\n with open(os.path.join(__here__, \"insights\", name)) as f:\n package_info[name] = f.read().strip()\n\nentry_points = {\n 'console_scripts': [\n 'insights-collect = insights.collect:main',\n 'insights-run = insights:main',\n 'insights = insights.command_parser:main',\n 'insights-cat = insights.tools.cat:main',\n 'insights-dupkeycheck = insights.tools.dupkeycheck:main',\n 'insights-inspect = insights.tools.insights_inspect:main',\n 'insights-info = insights.tools.query:main',\n 'insights-ocpshell= insights.ocpshell:main',\n 'gen_api = insights.tools.generate_api_config:main',\n 'insights-perf = insights.tools.perf:main',\n 'client = insights.client:run',\n 'mangle = insights.util.mangle:main'\n ]\n}\n\nruntime = set([\n 'six',\n 'requests',\n 'redis',\n 'cachecontrol',\n 'cachecontrol[redis]',\n 'cachecontrol[filecache]',\n 'defusedxml',\n 'lockfile',\n 'jinja2',\n])\n\nif (sys.version_info < (2, 7)):\n runtime.add('pyyaml>=3.10,<=3.13')\nelse:\n runtime.add('pyyaml')\n\n\ndef maybe_require(pkg):\n try:\n __import__(pkg)\n except ImportError:\n runtime.add(pkg)\n\n\nmaybe_require(\"importlib\")\nmaybe_require(\"argparse\")\n\n\nclient = set([\n 'requests'\n])\n\ndevelop = set([\n 'futures==3.0.5',\n 'wheel',\n])\n\ndocs = set([\n 'Sphinx<=3.0.2',\n 'nbsphinx',\n 'sphinx_rtd_theme',\n 'ipython',\n 'colorama',\n 'jinja2',\n 'Pygments'\n])\n\ntesting = set([\n 'coverage==4.3.4',\n 'pytest==3.0.6',\n 'pytest-cov==2.4.0',\n 'mock==2.0.0',\n])\n\ncluster = set([\n 'ansible',\n 'pandas',\n 'colorama',\n])\n\nopenshift = set([\n 'openshift'\n])\n\nlinting = set([\n 'flake8==2.6.2',\n])\n\noptional = set([\n 'python-cjson',\n 'python-logstash',\n 'python-statsd',\n 'watchdog',\n])\n\nif __name__ == \"__main__\":\n # allows for runtime modification of rpm name\n name = os.environ.get(\"INSIGHTS_CORE_NAME\", package_info[\"NAME\"])\n\n setup(\n name=name,\n version=package_info[\"VERSION\"],\n description=\"Insights Core is a data collection and analysis framework\",\n long_description=open(\"README.rst\").read(),\n url=\"https://github.com/redhatinsights/insights-core\",\n author=\"Red Hat, Inc.\",\n author_email=\"[email protected]\",\n packages=find_packages(),\n install_requires=list(runtime),\n package_data={'': ['LICENSE']},\n license='Apache 2.0',\n extras_require={\n 'develop': list(runtime | develop | client | docs | linting | testing | cluster),\n 'develop26': list(runtime | develop | client | linting | testing | cluster),\n 'client': list(runtime | client),\n 'client-develop': list(runtime | develop | client | linting | testing),\n 'cluster': list(runtime | cluster),\n 'openshift': list(runtime | openshift),\n 'optional': list(optional),\n 'docs': list(docs),\n 'linting': list(linting | client),\n 'testing': list(testing | client)\n },\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'Natural Language :: English',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6'\n ],\n entry_points=entry_points,\n include_package_data=True\n )\n", "path": "setup.py"}], "after_files": [{"content": "import os\nimport sys\nfrom setuptools import setup, find_packages\n\n__here__ = os.path.dirname(os.path.abspath(__file__))\n\npackage_info = dict.fromkeys([\"RELEASE\", \"COMMIT\", \"VERSION\", \"NAME\"])\n\nfor name in package_info:\n with open(os.path.join(__here__, \"insights\", name)) as f:\n package_info[name] = f.read().strip()\n\nentry_points = {\n 'console_scripts': [\n 'insights-collect = insights.collect:main',\n 'insights-run = insights:main',\n 'insights = insights.command_parser:main',\n 'insights-cat = insights.tools.cat:main',\n 'insights-dupkeycheck = insights.tools.dupkeycheck:main',\n 'insights-inspect = insights.tools.insights_inspect:main',\n 'insights-info = insights.tools.query:main',\n 'insights-ocpshell= insights.ocpshell:main',\n 'client = insights.client:run',\n 'mangle = insights.util.mangle:main'\n ]\n}\n\nruntime = set([\n 'six',\n 'requests',\n 'redis',\n 'cachecontrol',\n 'cachecontrol[redis]',\n 'cachecontrol[filecache]',\n 'defusedxml',\n 'lockfile',\n 'jinja2',\n])\n\nif (sys.version_info < (2, 7)):\n runtime.add('pyyaml>=3.10,<=3.13')\nelse:\n runtime.add('pyyaml')\n\n\ndef maybe_require(pkg):\n try:\n __import__(pkg)\n except ImportError:\n runtime.add(pkg)\n\n\nmaybe_require(\"importlib\")\nmaybe_require(\"argparse\")\n\n\nclient = set([\n 'requests'\n])\n\ndevelop = set([\n 'futures==3.0.5',\n 'wheel',\n])\n\ndocs = set([\n 'Sphinx<=3.0.2',\n 'nbsphinx',\n 'sphinx_rtd_theme',\n 'ipython',\n 'colorama',\n 'jinja2',\n 'Pygments'\n])\n\ntesting = set([\n 'coverage==4.3.4',\n 'pytest==3.0.6',\n 'pytest-cov==2.4.0',\n 'mock==2.0.0',\n])\n\ncluster = set([\n 'ansible',\n 'pandas',\n 'colorama',\n])\n\nopenshift = set([\n 'openshift'\n])\n\nlinting = set([\n 'flake8==2.6.2',\n])\n\noptional = set([\n 'python-cjson',\n 'python-logstash',\n 'python-statsd',\n 'watchdog',\n])\n\nif __name__ == \"__main__\":\n # allows for runtime modification of rpm name\n name = os.environ.get(\"INSIGHTS_CORE_NAME\", package_info[\"NAME\"])\n\n setup(\n name=name,\n version=package_info[\"VERSION\"],\n description=\"Insights Core is a data collection and analysis framework\",\n long_description=open(\"README.rst\").read(),\n url=\"https://github.com/redhatinsights/insights-core\",\n author=\"Red Hat, Inc.\",\n author_email=\"[email protected]\",\n packages=find_packages(),\n install_requires=list(runtime),\n package_data={'': ['LICENSE']},\n license='Apache 2.0',\n extras_require={\n 'develop': list(runtime | develop | client | docs | linting | testing | cluster),\n 'develop26': list(runtime | develop | client | linting | testing | cluster),\n 'client': list(runtime | client),\n 'client-develop': list(runtime | develop | client | linting | testing),\n 'cluster': list(runtime | cluster),\n 'openshift': list(runtime | openshift),\n 'optional': list(optional),\n 'docs': list(docs),\n 'linting': list(linting | client),\n 'testing': list(testing | client)\n },\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'Natural Language :: English',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6'\n ],\n entry_points=entry_points,\n include_package_data=True\n )\n", "path": "setup.py"}]}
| 1,680 | 128 |
gh_patches_debug_26911
|
rasdani/github-patches
|
git_diff
|
pyca__cryptography-1424
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Need zope.interface.verify.verifyObject for ABCs.
Every object that claims to provide some interface should then use it in a unit test.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `cryptography/utils.py`
Content:
```
1 # Licensed under the Apache License, Version 2.0 (the "License");
2 # you may not use this file except in compliance with the License.
3 # You may obtain a copy of the License at
4 #
5 # http://www.apache.org/licenses/LICENSE-2.0
6 #
7 # Unless required by applicable law or agreed to in writing, software
8 # distributed under the License is distributed on an "AS IS" BASIS,
9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
10 # implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 from __future__ import absolute_import, division, print_function
15
16 import sys
17
18
19 DeprecatedIn06 = DeprecationWarning
20
21
22 def register_interface(iface):
23 def register_decorator(klass):
24 iface.register(klass)
25 return klass
26 return register_decorator
27
28
29 def read_only_property(name):
30 return property(lambda self: getattr(self, name))
31
32
33 def bit_length(x):
34 if sys.version_info >= (2, 7):
35 return x.bit_length()
36 else:
37 return len(bin(x)) - (2 + (x <= 0))
38
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/cryptography/utils.py b/cryptography/utils.py
--- a/cryptography/utils.py
+++ b/cryptography/utils.py
@@ -13,6 +13,8 @@
from __future__ import absolute_import, division, print_function
+import abc
+import inspect
import sys
@@ -21,6 +23,7 @@
def register_interface(iface):
def register_decorator(klass):
+ verify_interface(iface, klass)
iface.register(klass)
return klass
return register_decorator
@@ -30,6 +33,30 @@
return property(lambda self: getattr(self, name))
+class InterfaceNotImplemented(Exception):
+ pass
+
+
+def verify_interface(iface, klass):
+ for method in iface.__abstractmethods__:
+ if not hasattr(klass, method):
+ raise InterfaceNotImplemented(
+ "{0} is missing a {1!r} method".format(klass, method)
+ )
+ if isinstance(getattr(iface, method), abc.abstractproperty):
+ # Can't properly verify these yet.
+ continue
+ spec = inspect.getargspec(getattr(iface, method))
+ actual = inspect.getargspec(getattr(klass, method))
+ if spec != actual:
+ raise InterfaceNotImplemented(
+ "{0}.{1}'s signature differs from the expected. Expected: "
+ "{2!r}. Received: {3!r}".format(
+ klass, method, spec, actual
+ )
+ )
+
+
def bit_length(x):
if sys.version_info >= (2, 7):
return x.bit_length()
|
{"golden_diff": "diff --git a/cryptography/utils.py b/cryptography/utils.py\n--- a/cryptography/utils.py\n+++ b/cryptography/utils.py\n@@ -13,6 +13,8 @@\n \n from __future__ import absolute_import, division, print_function\n \n+import abc\n+import inspect\n import sys\n \n \n@@ -21,6 +23,7 @@\n \n def register_interface(iface):\n def register_decorator(klass):\n+ verify_interface(iface, klass)\n iface.register(klass)\n return klass\n return register_decorator\n@@ -30,6 +33,30 @@\n return property(lambda self: getattr(self, name))\n \n \n+class InterfaceNotImplemented(Exception):\n+ pass\n+\n+\n+def verify_interface(iface, klass):\n+ for method in iface.__abstractmethods__:\n+ if not hasattr(klass, method):\n+ raise InterfaceNotImplemented(\n+ \"{0} is missing a {1!r} method\".format(klass, method)\n+ )\n+ if isinstance(getattr(iface, method), abc.abstractproperty):\n+ # Can't properly verify these yet.\n+ continue\n+ spec = inspect.getargspec(getattr(iface, method))\n+ actual = inspect.getargspec(getattr(klass, method))\n+ if spec != actual:\n+ raise InterfaceNotImplemented(\n+ \"{0}.{1}'s signature differs from the expected. Expected: \"\n+ \"{2!r}. Received: {3!r}\".format(\n+ klass, method, spec, actual\n+ )\n+ )\n+\n+\n def bit_length(x):\n if sys.version_info >= (2, 7):\n return x.bit_length()\n", "issue": "Need zope.interface.verify.verifyObject for ABCs.\nEvery object that claims to provide some interface should then use it in a unit test.\n\n", "before_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import, division, print_function\n\nimport sys\n\n\nDeprecatedIn06 = DeprecationWarning\n\n\ndef register_interface(iface):\n def register_decorator(klass):\n iface.register(klass)\n return klass\n return register_decorator\n\n\ndef read_only_property(name):\n return property(lambda self: getattr(self, name))\n\n\ndef bit_length(x):\n if sys.version_info >= (2, 7):\n return x.bit_length()\n else:\n return len(bin(x)) - (2 + (x <= 0))\n", "path": "cryptography/utils.py"}], "after_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import, division, print_function\n\nimport abc\nimport inspect\nimport sys\n\n\nDeprecatedIn06 = DeprecationWarning\n\n\ndef register_interface(iface):\n def register_decorator(klass):\n verify_interface(iface, klass)\n iface.register(klass)\n return klass\n return register_decorator\n\n\ndef read_only_property(name):\n return property(lambda self: getattr(self, name))\n\n\nclass InterfaceNotImplemented(Exception):\n pass\n\n\ndef verify_interface(iface, klass):\n for method in iface.__abstractmethods__:\n if not hasattr(klass, method):\n raise InterfaceNotImplemented(\n \"{0} is missing a {1!r} method\".format(klass, method)\n )\n if isinstance(getattr(iface, method), abc.abstractproperty):\n # Can't properly verify these yet.\n continue\n spec = inspect.getargspec(getattr(iface, method))\n actual = inspect.getargspec(getattr(klass, method))\n if spec != actual:\n raise InterfaceNotImplemented(\n \"{0}.{1}'s signature differs from the expected. Expected: \"\n \"{2!r}. Received: {3!r}\".format(\n klass, method, spec, actual\n )\n )\n\n\ndef bit_length(x):\n if sys.version_info >= (2, 7):\n return x.bit_length()\n else:\n return len(bin(x)) - (2 + (x <= 0))\n", "path": "cryptography/utils.py"}]}
| 595 | 357 |
gh_patches_debug_23901
|
rasdani/github-patches
|
git_diff
|
ultrabug__py3status-946
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
GitHub: Show correct count of unread notifications
If you have more than 50 unread notifications in total, and you want to see that total notification count, the GitHub module still only shows 50 due to pagination:
https://github.com/ultrabug/py3status/blob/master/py3status/modules/github.py#L155
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `py3status/modules/github.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 """
3 Display Github notifications and issue/pull requests for a repo.
4
5 To check notifications a Github `username` and `personal access token` are
6 required. You can create a personal access token at
7 https://github.com/settings/tokens The only `scope` needed is `notifications`,
8 which provides readonly access to notifications.
9
10 The Github API is rate limited so setting `cache_timeout` too small may cause
11 issues see https://developer.github.com/v3/#rate-limiting for details
12
13 Configuration parameters:
14 auth_token: Github personal access token, needed to check notifications
15 see above.
16 (default None)
17 button_action: Button that when clicked opens the Github notification page
18 if notifications, else the project page for the repository if there is
19 one (otherwise the github home page). Setting to `None` disables.
20 (default 3)
21 button_refresh: Button that when clicked refreshes module.
22 Setting to `None` disables.
23 (default 2)
24 cache_timeout: How often we refresh this module in seconds
25 (default 60)
26 format: Format of output
27 *(default '{repo} {issues}/{pull_requests}{notifications}'
28 if username and auth_token provided else
29 '{repo} {issues}/{pull_requests}')*
30 format_notifications: Format of `{notification}` status placeholder.
31 (default ' N{notifications_count}')
32 notifications: Type of notifications can be `all` for all notifications or
33 `repo` to only get notifications for the repo specified. If repo is
34 not provided then all notifications will be checked.
35 (default 'all')
36 repo: Github repo to check
37 (default 'ultrabug/py3status')
38 username: Github username, needed to check notifications.
39 (default None)
40
41 Format placeholders:
42 {issues} Number of open issues.
43 {notifications} Notifications. If no notifications this will be empty.
44 {notifications_count} Number of notifications. This is also the __Only__
45 placeholder available to `format_notifications`.
46 {pull_requests} Number of open pull requests
47 {repo} short name of the repository being checked. eg py3status
48 {repo_full} full name of the repository being checked. eg ultrabug/py3status
49
50 Examples:
51 ```
52 # set github access credentials
53 github {
54 auth_token = '40_char_hex_access_token'
55 username = 'my_username'
56 }
57
58 # just check for any notifications
59 github {
60 auth_token = '40_char_hex_access_token'
61 username = 'my_username'
62 format = 'Github {notifications_count}'
63 }
64 ```
65
66 @author tobes
67
68 SAMPLE OUTPUT
69 {'full_text': 'py3status 34/24'}
70
71 notification
72 {'full_text': 'py3status 34/24 N3', 'urgent': True}
73 """
74
75 GITHUB_API_URL = 'https://api.github.com'
76 GITHUB_URL = 'https://github.com/'
77
78
79 class Py3status:
80 auth_token = None
81 button_action = 3
82 button_refresh = 2
83 cache_timeout = 60
84 format = None
85 format_notifications = ' N{notifications_count}'
86 notifications = 'all'
87 repo = 'ultrabug/py3status'
88 username = None
89
90 def post_config_hook(self):
91 self.first = True
92 self.notification_warning = False
93 self.repo_warning = False
94 self._issues = '?'
95 self._pulls = '?'
96 self._notify = '?'
97
98 def _init(self):
99 # Set format if user has not configured it.
100 if not self.format:
101 if self.username and self.auth_token:
102 # include notifications
103 self.format = '{repo} {issues}/{pull_requests}{notifications}'
104 else:
105 self.format = '{repo} {issues}/{pull_requests}'
106
107 def _github_count(self, url):
108 """
109 Get counts for requests that return 'total_count' in the json response.
110 """
111 if self.first:
112 return '?'
113 url = GITHUB_API_URL + url + '&per_page=1'
114 # if we have authentication details use them as we get better
115 # rate-limiting.
116 if self.username and self.auth_token:
117 auth = (self.username, self.auth_token)
118 else:
119 auth = None
120 try:
121 info = self.py3.request(url, timeout=10, auth=auth)
122 except (self.py3.RequestException):
123 return
124 if info and info.status_code == 200:
125 return(int(info.json()['total_count']))
126 if info.status_code == 422:
127 if not self.repo_warning:
128 self.py3.notify_user('Github repo cannot be found.')
129 self.repo_warning = True
130 return '?'
131
132 def _notifications(self):
133 """
134 Get the number of unread notifications.
135 """
136 if not self.username or not self.auth_token:
137 if not self.notification_warning:
138 self.py3.notify_user('Github module needs username and '
139 'auth_token to check notifications.')
140 self.notification_warning = True
141 return '?'
142 if self.first:
143 return '?'
144 if self.notifications == 'all' or not self.repo:
145 url = GITHUB_API_URL + '/notifications'
146 else:
147 url = GITHUB_API_URL + '/repos/' + self.repo + '/notifications'
148 url += '?per_page=100'
149 try:
150 info = self.py3.request(url, timeout=10,
151 auth=(self.username, self.auth_token))
152 except (self.py3.RequestException):
153 return
154 if info.status_code == 200:
155 return len(info.json())
156 if info.status_code == 404:
157 if not self.repo_warning:
158 self.py3.notify_user('Github repo cannot be found.')
159 self.repo_warning = True
160
161 def github(self):
162 if self.first:
163 self._init()
164 status = {}
165 urgent = False
166 # issues
167 if self.repo and self.py3.format_contains(self.format, 'issues'):
168 url = '/search/issues?q=state:open+type:issue+repo:' + self.repo
169 self._issues = self._github_count(url) or self._issues
170 status['issues'] = self._issues
171 # pull requests
172 if self.repo and self.py3.format_contains(self.format, 'pull_requests'):
173 url = '/search/issues?q=state:open+type:pr+repo:' + self.repo
174 self._pulls = self._github_count(url) or self._pulls
175 status['pull_requests'] = self._pulls
176 # notifications
177 if (self.py3.format_contains(self.format, 'notifications') or
178 self.py3.format_contains(self.format, 'notifications_count')):
179 count = self._notifications()
180 # if we don't have a notification count, then use the last value
181 # that we did have.
182 if count is None:
183 count = self._notify
184 self._notify = count
185 if count and count != '?':
186 notify = self.py3.safe_format(
187 self.format_notifications,
188 {'notifications_count': count})
189 urgent = True
190 else:
191 notify = ''
192 status['notifications'] = notify
193 status['notifications_count'] = count
194 # repo
195 try:
196 status['repo'] = self.repo.split('/')[1]
197 except IndexError:
198 status['repo'] = 'Error'
199 status['repo_full'] = self.repo
200
201 if self.first:
202 cached_until = 0
203 self.first = False
204 else:
205 cached_until = self.py3.time_in(self.cache_timeout)
206
207 return {
208 'full_text': self.py3.safe_format(self.format, status),
209 'cached_until': cached_until,
210 'urgent': urgent
211 }
212
213 def on_click(self, event):
214 button = event['button']
215 if button == self.button_action:
216 # open github in browser
217 if self._notify and self._notify != '?':
218 # open github notifications page
219 url = GITHUB_URL + 'notifications'
220 else:
221 if self.notifications == 'all' and not self.repo:
222 # open github.com if there are no unread notifications and no repo
223 url = GITHUB_URL
224 else:
225 # open repo page if there are no unread notifications
226 url = GITHUB_URL + self.repo
227 # open url in default browser
228 self.py3.command_run('xdg-open {}'.format(url))
229 self.py3.prevent_refresh()
230 elif button != self.button_refresh:
231 # only refresh the module if needed
232 self.py3.prevent_refresh()
233
234
235 if __name__ == "__main__":
236 """
237 Run module in test mode.
238 """
239 from py3status.module_test import module_test
240 module_test(Py3status)
241
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/py3status/modules/github.py b/py3status/modules/github.py
--- a/py3status/modules/github.py
+++ b/py3status/modules/github.py
@@ -152,7 +152,32 @@
except (self.py3.RequestException):
return
if info.status_code == 200:
- return len(info.json())
+ links = info._response.headers.get('Link', '').split(', ')
+ if not links:
+ return len(info.json())
+
+ last_page = 1
+ for link in links:
+ if 'rel="last"' in link:
+ import sys
+ if sys.version_info[0] == 2:
+ import urlparse
+ else:
+ import urllib.parse as urlparse
+ last_url = link[link.find('<') + 1:link.find('>')]
+ parsed = urlparse.urlparse(last_url)
+ last_page = int(urlparse.parse_qs(parsed.query)['page'][0])
+
+ if last_page == 1:
+ return len(info.json())
+ try:
+ last_page_info = self.py3.request(last_url, timeout=10,
+ auth=(self.username, self.auth_token))
+ except self.py3.RequestException:
+ return
+
+ return len(info.json()) * (last_page - 1) + len(last_page_info.json())
+
if info.status_code == 404:
if not self.repo_warning:
self.py3.notify_user('Github repo cannot be found.')
|
{"golden_diff": "diff --git a/py3status/modules/github.py b/py3status/modules/github.py\n--- a/py3status/modules/github.py\n+++ b/py3status/modules/github.py\n@@ -152,7 +152,32 @@\n except (self.py3.RequestException):\n return\n if info.status_code == 200:\n- return len(info.json())\n+ links = info._response.headers.get('Link', '').split(', ')\n+ if not links:\n+ return len(info.json())\n+\n+ last_page = 1\n+ for link in links:\n+ if 'rel=\"last\"' in link:\n+ import sys\n+ if sys.version_info[0] == 2:\n+ import urlparse\n+ else:\n+ import urllib.parse as urlparse\n+ last_url = link[link.find('<') + 1:link.find('>')]\n+ parsed = urlparse.urlparse(last_url)\n+ last_page = int(urlparse.parse_qs(parsed.query)['page'][0])\n+\n+ if last_page == 1:\n+ return len(info.json())\n+ try:\n+ last_page_info = self.py3.request(last_url, timeout=10,\n+ auth=(self.username, self.auth_token))\n+ except self.py3.RequestException:\n+ return\n+\n+ return len(info.json()) * (last_page - 1) + len(last_page_info.json())\n+\n if info.status_code == 404:\n if not self.repo_warning:\n self.py3.notify_user('Github repo cannot be found.')\n", "issue": "GitHub: Show correct count of unread notifications\nIf you have more than 50 unread notifications in total, and you want to see that total notification count, the GitHub module still only shows 50 due to pagination:\r\n\r\nhttps://github.com/ultrabug/py3status/blob/master/py3status/modules/github.py#L155\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nDisplay Github notifications and issue/pull requests for a repo.\n\nTo check notifications a Github `username` and `personal access token` are\nrequired. You can create a personal access token at\nhttps://github.com/settings/tokens The only `scope` needed is `notifications`,\nwhich provides readonly access to notifications.\n\nThe Github API is rate limited so setting `cache_timeout` too small may cause\nissues see https://developer.github.com/v3/#rate-limiting for details\n\nConfiguration parameters:\n auth_token: Github personal access token, needed to check notifications\n see above.\n (default None)\n button_action: Button that when clicked opens the Github notification page\n if notifications, else the project page for the repository if there is\n one (otherwise the github home page). Setting to `None` disables.\n (default 3)\n button_refresh: Button that when clicked refreshes module.\n Setting to `None` disables.\n (default 2)\n cache_timeout: How often we refresh this module in seconds\n (default 60)\n format: Format of output\n *(default '{repo} {issues}/{pull_requests}{notifications}'\n if username and auth_token provided else\n '{repo} {issues}/{pull_requests}')*\n format_notifications: Format of `{notification}` status placeholder.\n (default ' N{notifications_count}')\n notifications: Type of notifications can be `all` for all notifications or\n `repo` to only get notifications for the repo specified. If repo is\n not provided then all notifications will be checked.\n (default 'all')\n repo: Github repo to check\n (default 'ultrabug/py3status')\n username: Github username, needed to check notifications.\n (default None)\n\nFormat placeholders:\n {issues} Number of open issues.\n {notifications} Notifications. If no notifications this will be empty.\n {notifications_count} Number of notifications. This is also the __Only__\n placeholder available to `format_notifications`.\n {pull_requests} Number of open pull requests\n {repo} short name of the repository being checked. eg py3status\n {repo_full} full name of the repository being checked. eg ultrabug/py3status\n\nExamples:\n```\n# set github access credentials\ngithub {\n auth_token = '40_char_hex_access_token'\n username = 'my_username'\n}\n\n# just check for any notifications\ngithub {\n auth_token = '40_char_hex_access_token'\n username = 'my_username'\n format = 'Github {notifications_count}'\n}\n```\n\n@author tobes\n\nSAMPLE OUTPUT\n{'full_text': 'py3status 34/24'}\n\nnotification\n{'full_text': 'py3status 34/24 N3', 'urgent': True}\n\"\"\"\n\nGITHUB_API_URL = 'https://api.github.com'\nGITHUB_URL = 'https://github.com/'\n\n\nclass Py3status:\n auth_token = None\n button_action = 3\n button_refresh = 2\n cache_timeout = 60\n format = None\n format_notifications = ' N{notifications_count}'\n notifications = 'all'\n repo = 'ultrabug/py3status'\n username = None\n\n def post_config_hook(self):\n self.first = True\n self.notification_warning = False\n self.repo_warning = False\n self._issues = '?'\n self._pulls = '?'\n self._notify = '?'\n\n def _init(self):\n # Set format if user has not configured it.\n if not self.format:\n if self.username and self.auth_token:\n # include notifications\n self.format = '{repo} {issues}/{pull_requests}{notifications}'\n else:\n self.format = '{repo} {issues}/{pull_requests}'\n\n def _github_count(self, url):\n \"\"\"\n Get counts for requests that return 'total_count' in the json response.\n \"\"\"\n if self.first:\n return '?'\n url = GITHUB_API_URL + url + '&per_page=1'\n # if we have authentication details use them as we get better\n # rate-limiting.\n if self.username and self.auth_token:\n auth = (self.username, self.auth_token)\n else:\n auth = None\n try:\n info = self.py3.request(url, timeout=10, auth=auth)\n except (self.py3.RequestException):\n return\n if info and info.status_code == 200:\n return(int(info.json()['total_count']))\n if info.status_code == 422:\n if not self.repo_warning:\n self.py3.notify_user('Github repo cannot be found.')\n self.repo_warning = True\n return '?'\n\n def _notifications(self):\n \"\"\"\n Get the number of unread notifications.\n \"\"\"\n if not self.username or not self.auth_token:\n if not self.notification_warning:\n self.py3.notify_user('Github module needs username and '\n 'auth_token to check notifications.')\n self.notification_warning = True\n return '?'\n if self.first:\n return '?'\n if self.notifications == 'all' or not self.repo:\n url = GITHUB_API_URL + '/notifications'\n else:\n url = GITHUB_API_URL + '/repos/' + self.repo + '/notifications'\n url += '?per_page=100'\n try:\n info = self.py3.request(url, timeout=10,\n auth=(self.username, self.auth_token))\n except (self.py3.RequestException):\n return\n if info.status_code == 200:\n return len(info.json())\n if info.status_code == 404:\n if not self.repo_warning:\n self.py3.notify_user('Github repo cannot be found.')\n self.repo_warning = True\n\n def github(self):\n if self.first:\n self._init()\n status = {}\n urgent = False\n # issues\n if self.repo and self.py3.format_contains(self.format, 'issues'):\n url = '/search/issues?q=state:open+type:issue+repo:' + self.repo\n self._issues = self._github_count(url) or self._issues\n status['issues'] = self._issues\n # pull requests\n if self.repo and self.py3.format_contains(self.format, 'pull_requests'):\n url = '/search/issues?q=state:open+type:pr+repo:' + self.repo\n self._pulls = self._github_count(url) or self._pulls\n status['pull_requests'] = self._pulls\n # notifications\n if (self.py3.format_contains(self.format, 'notifications') or\n self.py3.format_contains(self.format, 'notifications_count')):\n count = self._notifications()\n # if we don't have a notification count, then use the last value\n # that we did have.\n if count is None:\n count = self._notify\n self._notify = count\n if count and count != '?':\n notify = self.py3.safe_format(\n self.format_notifications,\n {'notifications_count': count})\n urgent = True\n else:\n notify = ''\n status['notifications'] = notify\n status['notifications_count'] = count\n # repo\n try:\n status['repo'] = self.repo.split('/')[1]\n except IndexError:\n status['repo'] = 'Error'\n status['repo_full'] = self.repo\n\n if self.first:\n cached_until = 0\n self.first = False\n else:\n cached_until = self.py3.time_in(self.cache_timeout)\n\n return {\n 'full_text': self.py3.safe_format(self.format, status),\n 'cached_until': cached_until,\n 'urgent': urgent\n }\n\n def on_click(self, event):\n button = event['button']\n if button == self.button_action:\n # open github in browser\n if self._notify and self._notify != '?':\n # open github notifications page\n url = GITHUB_URL + 'notifications'\n else:\n if self.notifications == 'all' and not self.repo:\n # open github.com if there are no unread notifications and no repo\n url = GITHUB_URL\n else:\n # open repo page if there are no unread notifications\n url = GITHUB_URL + self.repo\n # open url in default browser\n self.py3.command_run('xdg-open {}'.format(url))\n self.py3.prevent_refresh()\n elif button != self.button_refresh:\n # only refresh the module if needed\n self.py3.prevent_refresh()\n\n\nif __name__ == \"__main__\":\n \"\"\"\n Run module in test mode.\n \"\"\"\n from py3status.module_test import module_test\n module_test(Py3status)\n", "path": "py3status/modules/github.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nDisplay Github notifications and issue/pull requests for a repo.\n\nTo check notifications a Github `username` and `personal access token` are\nrequired. You can create a personal access token at\nhttps://github.com/settings/tokens The only `scope` needed is `notifications`,\nwhich provides readonly access to notifications.\n\nThe Github API is rate limited so setting `cache_timeout` too small may cause\nissues see https://developer.github.com/v3/#rate-limiting for details\n\nConfiguration parameters:\n auth_token: Github personal access token, needed to check notifications\n see above.\n (default None)\n button_action: Button that when clicked opens the Github notification page\n if notifications, else the project page for the repository if there is\n one (otherwise the github home page). Setting to `None` disables.\n (default 3)\n button_refresh: Button that when clicked refreshes module.\n Setting to `None` disables.\n (default 2)\n cache_timeout: How often we refresh this module in seconds\n (default 60)\n format: Format of output\n *(default '{repo} {issues}/{pull_requests}{notifications}'\n if username and auth_token provided else\n '{repo} {issues}/{pull_requests}')*\n format_notifications: Format of `{notification}` status placeholder.\n (default ' N{notifications_count}')\n notifications: Type of notifications can be `all` for all notifications or\n `repo` to only get notifications for the repo specified. If repo is\n not provided then all notifications will be checked.\n (default 'all')\n repo: Github repo to check\n (default 'ultrabug/py3status')\n username: Github username, needed to check notifications.\n (default None)\n\nFormat placeholders:\n {issues} Number of open issues.\n {notifications} Notifications. If no notifications this will be empty.\n {notifications_count} Number of notifications. This is also the __Only__\n placeholder available to `format_notifications`.\n {pull_requests} Number of open pull requests\n {repo} short name of the repository being checked. eg py3status\n {repo_full} full name of the repository being checked. eg ultrabug/py3status\n\nExamples:\n```\n# set github access credentials\ngithub {\n auth_token = '40_char_hex_access_token'\n username = 'my_username'\n}\n\n# just check for any notifications\ngithub {\n auth_token = '40_char_hex_access_token'\n username = 'my_username'\n format = 'Github {notifications_count}'\n}\n```\n\n@author tobes\n\nSAMPLE OUTPUT\n{'full_text': 'py3status 34/24'}\n\nnotification\n{'full_text': 'py3status 34/24 N3', 'urgent': True}\n\"\"\"\n\nGITHUB_API_URL = 'https://api.github.com'\nGITHUB_URL = 'https://github.com/'\n\n\nclass Py3status:\n auth_token = None\n button_action = 3\n button_refresh = 2\n cache_timeout = 60\n format = None\n format_notifications = ' N{notifications_count}'\n notifications = 'all'\n repo = 'ultrabug/py3status'\n username = None\n\n def post_config_hook(self):\n self.first = True\n self.notification_warning = False\n self.repo_warning = False\n self._issues = '?'\n self._pulls = '?'\n self._notify = '?'\n\n def _init(self):\n # Set format if user has not configured it.\n if not self.format:\n if self.username and self.auth_token:\n # include notifications\n self.format = '{repo} {issues}/{pull_requests}{notifications}'\n else:\n self.format = '{repo} {issues}/{pull_requests}'\n\n def _github_count(self, url):\n \"\"\"\n Get counts for requests that return 'total_count' in the json response.\n \"\"\"\n if self.first:\n return '?'\n url = GITHUB_API_URL + url + '&per_page=1'\n # if we have authentication details use them as we get better\n # rate-limiting.\n if self.username and self.auth_token:\n auth = (self.username, self.auth_token)\n else:\n auth = None\n try:\n info = self.py3.request(url, timeout=10, auth=auth)\n except (self.py3.RequestException):\n return\n if info and info.status_code == 200:\n return(int(info.json()['total_count']))\n if info.status_code == 422:\n if not self.repo_warning:\n self.py3.notify_user('Github repo cannot be found.')\n self.repo_warning = True\n return '?'\n\n def _notifications(self):\n \"\"\"\n Get the number of unread notifications.\n \"\"\"\n if not self.username or not self.auth_token:\n if not self.notification_warning:\n self.py3.notify_user('Github module needs username and '\n 'auth_token to check notifications.')\n self.notification_warning = True\n return '?'\n if self.first:\n return '?'\n if self.notifications == 'all' or not self.repo:\n url = GITHUB_API_URL + '/notifications'\n else:\n url = GITHUB_API_URL + '/repos/' + self.repo + '/notifications'\n url += '?per_page=100'\n try:\n info = self.py3.request(url, timeout=10,\n auth=(self.username, self.auth_token))\n except (self.py3.RequestException):\n return\n if info.status_code == 200:\n links = info._response.headers.get('Link', '').split(', ')\n if not links:\n return len(info.json())\n\n last_page = 1\n for link in links:\n if 'rel=\"last\"' in link:\n import sys\n if sys.version_info[0] == 2:\n import urlparse\n else:\n import urllib.parse as urlparse\n last_url = link[link.find('<') + 1:link.find('>')]\n parsed = urlparse.urlparse(last_url)\n last_page = int(urlparse.parse_qs(parsed.query)['page'][0])\n\n if last_page == 1:\n return len(info.json())\n try:\n last_page_info = self.py3.request(last_url, timeout=10,\n auth=(self.username, self.auth_token))\n except self.py3.RequestException:\n return\n\n return len(info.json()) * (last_page - 1) + len(last_page_info.json())\n\n if info.status_code == 404:\n if not self.repo_warning:\n self.py3.notify_user('Github repo cannot be found.')\n self.repo_warning = True\n\n def github(self):\n if self.first:\n self._init()\n status = {}\n urgent = False\n # issues\n if self.repo and self.py3.format_contains(self.format, 'issues'):\n url = '/search/issues?q=state:open+type:issue+repo:' + self.repo\n self._issues = self._github_count(url) or self._issues\n status['issues'] = self._issues\n # pull requests\n if self.repo and self.py3.format_contains(self.format, 'pull_requests'):\n url = '/search/issues?q=state:open+type:pr+repo:' + self.repo\n self._pulls = self._github_count(url) or self._pulls\n status['pull_requests'] = self._pulls\n # notifications\n if (self.py3.format_contains(self.format, 'notifications') or\n self.py3.format_contains(self.format, 'notifications_count')):\n count = self._notifications()\n # if we don't have a notification count, then use the last value\n # that we did have.\n if count is None:\n count = self._notify\n self._notify = count\n if count and count != '?':\n notify = self.py3.safe_format(\n self.format_notifications,\n {'notifications_count': count})\n urgent = True\n else:\n notify = ''\n status['notifications'] = notify\n status['notifications_count'] = count\n # repo\n try:\n status['repo'] = self.repo.split('/')[1]\n except IndexError:\n status['repo'] = 'Error'\n status['repo_full'] = self.repo\n\n if self.first:\n cached_until = 0\n self.first = False\n else:\n cached_until = self.py3.time_in(self.cache_timeout)\n\n return {\n 'full_text': self.py3.safe_format(self.format, status),\n 'cached_until': cached_until,\n 'urgent': urgent\n }\n\n def on_click(self, event):\n button = event['button']\n if button == self.button_action:\n # open github in browser\n if self._notify and self._notify != '?':\n # open github notifications page\n url = GITHUB_URL + 'notifications'\n else:\n if self.notifications == 'all' and not self.repo:\n # open github.com if there are no unread notifications and no repo\n url = GITHUB_URL\n else:\n # open repo page if there are no unread notifications\n url = GITHUB_URL + self.repo\n # open url in default browser\n self.py3.command_run('xdg-open {}'.format(url))\n self.py3.prevent_refresh()\n elif button != self.button_refresh:\n # only refresh the module if needed\n self.py3.prevent_refresh()\n\n\nif __name__ == \"__main__\":\n \"\"\"\n Run module in test mode.\n \"\"\"\n from py3status.module_test import module_test\n module_test(Py3status)\n", "path": "py3status/modules/github.py"}]}
| 2,830 | 336 |
gh_patches_debug_17382
|
rasdani/github-patches
|
git_diff
|
freedomofpress__securedrop-5440
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`new-password` route is defined twice
It looks to me like this route in the admin UI is defined redundantly:
https://github.com/freedomofpress/securedrop/blob/develop/securedrop/journalist_app/admin.py#L219
https://github.com/freedomofpress/securedrop/blob/develop/securedrop/journalist_app/admin.py#L259
If there is no reason for this, we should remove one of the redundant definitions.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `securedrop/journalist_app/admin.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 import os
4
5 from flask import (Blueprint, render_template, request, url_for, redirect, g,
6 current_app, flash, abort)
7 from flask_babel import gettext
8 from sqlalchemy.exc import IntegrityError
9 from sqlalchemy.orm.exc import NoResultFound
10
11 from db import db
12 from models import (InstanceConfig, Journalist, InvalidUsernameException,
13 FirstOrLastNameError, PasswordError)
14 from journalist_app.decorators import admin_required
15 from journalist_app.utils import (make_password, commit_account_changes, set_diceware_password,
16 validate_hotp_secret, revoke_token)
17 from journalist_app.forms import LogoForm, NewUserForm, SubmissionPreferencesForm
18
19
20 def make_blueprint(config):
21 view = Blueprint('admin', __name__)
22
23 @view.route('/', methods=('GET', 'POST'))
24 @admin_required
25 def index():
26 users = Journalist.query.all()
27 return render_template("admin.html", users=users)
28
29 @view.route('/config', methods=('GET', 'POST'))
30 @admin_required
31 def manage_config():
32 # The UI prompt ("prevent") is the opposite of the setting ("allow"):
33 submission_preferences_form = SubmissionPreferencesForm(
34 prevent_document_uploads=not current_app.instance_config.allow_document_uploads)
35 logo_form = LogoForm()
36 if logo_form.validate_on_submit():
37 f = logo_form.logo.data
38 custom_logo_filepath = os.path.join(current_app.static_folder, 'i',
39 'custom_logo.png')
40 try:
41 f.save(custom_logo_filepath)
42 flash(gettext("Image updated."), "logo-success")
43 except Exception:
44 flash("Unable to process the image file."
45 " Try another one.", "logo-error")
46 finally:
47 return redirect(url_for("admin.manage_config"))
48 else:
49 for field, errors in list(logo_form.errors.items()):
50 for error in errors:
51 flash(error, "logo-error")
52 return render_template("config.html",
53 submission_preferences_form=submission_preferences_form,
54 logo_form=logo_form)
55
56 @view.route('/update-submission-preferences', methods=['POST'])
57 @admin_required
58 def update_submission_preferences():
59 form = SubmissionPreferencesForm()
60 if form.validate_on_submit():
61 # The UI prompt ("prevent") is the opposite of the setting ("allow"):
62 flash(gettext("Preferences saved."), "submission-preferences-success")
63 value = not bool(request.form.get('prevent_document_uploads'))
64 InstanceConfig.set('allow_document_uploads', value)
65 return redirect(url_for('admin.manage_config'))
66
67 @view.route('/add', methods=('GET', 'POST'))
68 @admin_required
69 def add_user():
70 form = NewUserForm()
71 if form.validate_on_submit():
72 form_valid = True
73 username = request.form['username']
74 first_name = request.form['first_name']
75 last_name = request.form['last_name']
76 password = request.form['password']
77 is_admin = bool(request.form.get('is_admin'))
78
79 try:
80 otp_secret = None
81 if request.form.get('is_hotp', False):
82 otp_secret = request.form.get('otp_secret', '')
83 new_user = Journalist(username=username,
84 password=password,
85 first_name=first_name,
86 last_name=last_name,
87 is_admin=is_admin,
88 otp_secret=otp_secret)
89 db.session.add(new_user)
90 db.session.commit()
91 except PasswordError:
92 flash(gettext(
93 'There was an error with the autogenerated password. '
94 'User not created. Please try again.'), 'error')
95 form_valid = False
96 except InvalidUsernameException as e:
97 form_valid = False
98 flash('Invalid username: ' + str(e), "error")
99 except IntegrityError as e:
100 db.session.rollback()
101 form_valid = False
102 if "UNIQUE constraint failed: journalists.username" in str(e):
103 flash(gettext('Username "{user}" already taken.'.format(
104 user=username)), "error")
105 else:
106 flash(gettext("An error occurred saving this user"
107 " to the database."
108 " Please inform your admin."),
109 "error")
110 current_app.logger.error("Adding user "
111 "'{}' failed: {}".format(
112 username, e))
113
114 if form_valid:
115 return redirect(url_for('admin.new_user_two_factor',
116 uid=new_user.id))
117
118 return render_template("admin_add_user.html",
119 password=make_password(config),
120 form=form)
121
122 @view.route('/2fa', methods=('GET', 'POST'))
123 @admin_required
124 def new_user_two_factor():
125 user = Journalist.query.get(request.args['uid'])
126
127 if request.method == 'POST':
128 token = request.form['token']
129 if user.verify_token(token):
130 flash(gettext(
131 "The two-factor code for user \"{user}\" was verified "
132 "successfully.").format(user=user.username),
133 "notification")
134 return redirect(url_for("admin.index"))
135 else:
136 flash(gettext(
137 "There was a problem verifying the two-factor code. Please try again."),
138 "error")
139
140 return render_template("admin_new_user_two_factor.html", user=user)
141
142 @view.route('/reset-2fa-totp', methods=['POST'])
143 @admin_required
144 def reset_two_factor_totp():
145 uid = request.form['uid']
146 user = Journalist.query.get(uid)
147 user.is_totp = True
148 user.regenerate_totp_shared_secret()
149 db.session.commit()
150 return redirect(url_for('admin.new_user_two_factor', uid=uid))
151
152 @view.route('/reset-2fa-hotp', methods=['POST'])
153 @admin_required
154 def reset_two_factor_hotp():
155 uid = request.form['uid']
156 otp_secret = request.form.get('otp_secret', None)
157 if otp_secret:
158 user = Journalist.query.get(uid)
159 if not validate_hotp_secret(user, otp_secret):
160 return render_template('admin_edit_hotp_secret.html', uid=uid)
161 db.session.commit()
162 return redirect(url_for('admin.new_user_two_factor', uid=uid))
163 else:
164 return render_template('admin_edit_hotp_secret.html', uid=uid)
165
166 @view.route('/edit/<int:user_id>', methods=('GET', 'POST'))
167 @admin_required
168 def edit_user(user_id):
169 user = Journalist.query.get(user_id)
170
171 if request.method == 'POST':
172 if request.form.get('username', None):
173 new_username = request.form['username']
174
175 try:
176 Journalist.check_username_acceptable(new_username)
177 except InvalidUsernameException as e:
178 flash('Invalid username: ' + str(e), 'error')
179 return redirect(url_for("admin.edit_user",
180 user_id=user_id))
181
182 if new_username == user.username:
183 pass
184 elif Journalist.query.filter_by(
185 username=new_username).one_or_none():
186 flash(gettext(
187 'Username "{user}" already taken.').format(
188 user=new_username),
189 "error")
190 return redirect(url_for("admin.edit_user",
191 user_id=user_id))
192 else:
193 user.username = new_username
194
195 try:
196 first_name = request.form['first_name']
197 Journalist.check_name_acceptable(first_name)
198 user.first_name = first_name
199 except FirstOrLastNameError as e:
200 flash(gettext('Name not updated: {}'.format(e)), "error")
201 return redirect(url_for("admin.edit_user", user_id=user_id))
202
203 try:
204 last_name = request.form['last_name']
205 Journalist.check_name_acceptable(last_name)
206 user.last_name = last_name
207 except FirstOrLastNameError as e:
208 flash(gettext('Name not updated: {}'.format(e)), "error")
209 return redirect(url_for("admin.edit_user", user_id=user_id))
210
211 user.is_admin = bool(request.form.get('is_admin'))
212
213 commit_account_changes(user)
214
215 password = make_password(config)
216 return render_template("edit_account.html", user=user,
217 password=password)
218
219 @view.route('/edit/<int:user_id>/new-password', methods=('POST',))
220 @admin_required
221 def set_password(user_id):
222 try:
223 user = Journalist.query.get(user_id)
224 except NoResultFound:
225 abort(404)
226
227 password = request.form.get('password')
228 if set_diceware_password(user, password) is not False:
229 if user.last_token is not None:
230 revoke_token(user, user.last_token)
231 user.session_nonce += 1
232 db.session.commit()
233
234 return redirect(url_for('admin.edit_user', user_id=user_id))
235
236 @view.route('/delete/<int:user_id>', methods=('POST',))
237 @admin_required
238 def delete_user(user_id):
239 user = Journalist.query.get(user_id)
240 if user_id == g.user.id:
241 # Do not flash because the interface already has safe guards.
242 # It can only happen by manually crafting a POST request
243 current_app.logger.error(
244 "Admin {} tried to delete itself".format(g.user.username))
245 abort(403)
246 elif user:
247 db.session.delete(user)
248 db.session.commit()
249 flash(gettext("Deleted user '{user}'.").format(
250 user=user.username), "notification")
251 else:
252 current_app.logger.error(
253 "Admin {} tried to delete nonexistent user with pk={}".format(
254 g.user.username, user_id))
255 abort(404)
256
257 return redirect(url_for('admin.index'))
258
259 @view.route('/edit/<int:user_id>/new-password', methods=('POST',))
260 @admin_required
261 def new_password(user_id):
262 try:
263 user = Journalist.query.get(user_id)
264 except NoResultFound:
265 abort(404)
266
267 password = request.form.get('password')
268 if set_diceware_password(user, password) is not False:
269 if user.last_token is not None:
270 revoke_token(user, user.last_token)
271 user.session_nonce += 1
272 db.session.commit()
273 return redirect(url_for('admin.edit_user', user_id=user_id))
274
275 @view.route('/ossec-test')
276 @admin_required
277 def ossec_test():
278 current_app.logger.error('This is a test OSSEC alert')
279 flash(gettext('Test alert sent. Please check your email.'),
280 'notification')
281 return redirect(url_for('admin.manage_config'))
282
283 return view
284
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/securedrop/journalist_app/admin.py b/securedrop/journalist_app/admin.py
--- a/securedrop/journalist_app/admin.py
+++ b/securedrop/journalist_app/admin.py
@@ -216,23 +216,6 @@
return render_template("edit_account.html", user=user,
password=password)
- @view.route('/edit/<int:user_id>/new-password', methods=('POST',))
- @admin_required
- def set_password(user_id):
- try:
- user = Journalist.query.get(user_id)
- except NoResultFound:
- abort(404)
-
- password = request.form.get('password')
- if set_diceware_password(user, password) is not False:
- if user.last_token is not None:
- revoke_token(user, user.last_token)
- user.session_nonce += 1
- db.session.commit()
-
- return redirect(url_for('admin.edit_user', user_id=user_id))
-
@view.route('/delete/<int:user_id>', methods=('POST',))
@admin_required
def delete_user(user_id):
|
{"golden_diff": "diff --git a/securedrop/journalist_app/admin.py b/securedrop/journalist_app/admin.py\n--- a/securedrop/journalist_app/admin.py\n+++ b/securedrop/journalist_app/admin.py\n@@ -216,23 +216,6 @@\n return render_template(\"edit_account.html\", user=user,\n password=password)\n \n- @view.route('/edit/<int:user_id>/new-password', methods=('POST',))\n- @admin_required\n- def set_password(user_id):\n- try:\n- user = Journalist.query.get(user_id)\n- except NoResultFound:\n- abort(404)\n-\n- password = request.form.get('password')\n- if set_diceware_password(user, password) is not False:\n- if user.last_token is not None:\n- revoke_token(user, user.last_token)\n- user.session_nonce += 1\n- db.session.commit()\n-\n- return redirect(url_for('admin.edit_user', user_id=user_id))\n-\n @view.route('/delete/<int:user_id>', methods=('POST',))\n @admin_required\n def delete_user(user_id):\n", "issue": "`new-password` route is defined twice\nIt looks to me like this route in the admin UI is defined redundantly:\r\nhttps://github.com/freedomofpress/securedrop/blob/develop/securedrop/journalist_app/admin.py#L219\r\nhttps://github.com/freedomofpress/securedrop/blob/develop/securedrop/journalist_app/admin.py#L259\r\n\r\nIf there is no reason for this, we should remove one of the redundant definitions.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport os\n\nfrom flask import (Blueprint, render_template, request, url_for, redirect, g,\n current_app, flash, abort)\nfrom flask_babel import gettext\nfrom sqlalchemy.exc import IntegrityError\nfrom sqlalchemy.orm.exc import NoResultFound\n\nfrom db import db\nfrom models import (InstanceConfig, Journalist, InvalidUsernameException,\n FirstOrLastNameError, PasswordError)\nfrom journalist_app.decorators import admin_required\nfrom journalist_app.utils import (make_password, commit_account_changes, set_diceware_password,\n validate_hotp_secret, revoke_token)\nfrom journalist_app.forms import LogoForm, NewUserForm, SubmissionPreferencesForm\n\n\ndef make_blueprint(config):\n view = Blueprint('admin', __name__)\n\n @view.route('/', methods=('GET', 'POST'))\n @admin_required\n def index():\n users = Journalist.query.all()\n return render_template(\"admin.html\", users=users)\n\n @view.route('/config', methods=('GET', 'POST'))\n @admin_required\n def manage_config():\n # The UI prompt (\"prevent\") is the opposite of the setting (\"allow\"):\n submission_preferences_form = SubmissionPreferencesForm(\n prevent_document_uploads=not current_app.instance_config.allow_document_uploads)\n logo_form = LogoForm()\n if logo_form.validate_on_submit():\n f = logo_form.logo.data\n custom_logo_filepath = os.path.join(current_app.static_folder, 'i',\n 'custom_logo.png')\n try:\n f.save(custom_logo_filepath)\n flash(gettext(\"Image updated.\"), \"logo-success\")\n except Exception:\n flash(\"Unable to process the image file.\"\n \" Try another one.\", \"logo-error\")\n finally:\n return redirect(url_for(\"admin.manage_config\"))\n else:\n for field, errors in list(logo_form.errors.items()):\n for error in errors:\n flash(error, \"logo-error\")\n return render_template(\"config.html\",\n submission_preferences_form=submission_preferences_form,\n logo_form=logo_form)\n\n @view.route('/update-submission-preferences', methods=['POST'])\n @admin_required\n def update_submission_preferences():\n form = SubmissionPreferencesForm()\n if form.validate_on_submit():\n # The UI prompt (\"prevent\") is the opposite of the setting (\"allow\"):\n flash(gettext(\"Preferences saved.\"), \"submission-preferences-success\")\n value = not bool(request.form.get('prevent_document_uploads'))\n InstanceConfig.set('allow_document_uploads', value)\n return redirect(url_for('admin.manage_config'))\n\n @view.route('/add', methods=('GET', 'POST'))\n @admin_required\n def add_user():\n form = NewUserForm()\n if form.validate_on_submit():\n form_valid = True\n username = request.form['username']\n first_name = request.form['first_name']\n last_name = request.form['last_name']\n password = request.form['password']\n is_admin = bool(request.form.get('is_admin'))\n\n try:\n otp_secret = None\n if request.form.get('is_hotp', False):\n otp_secret = request.form.get('otp_secret', '')\n new_user = Journalist(username=username,\n password=password,\n first_name=first_name,\n last_name=last_name,\n is_admin=is_admin,\n otp_secret=otp_secret)\n db.session.add(new_user)\n db.session.commit()\n except PasswordError:\n flash(gettext(\n 'There was an error with the autogenerated password. '\n 'User not created. Please try again.'), 'error')\n form_valid = False\n except InvalidUsernameException as e:\n form_valid = False\n flash('Invalid username: ' + str(e), \"error\")\n except IntegrityError as e:\n db.session.rollback()\n form_valid = False\n if \"UNIQUE constraint failed: journalists.username\" in str(e):\n flash(gettext('Username \"{user}\" already taken.'.format(\n user=username)), \"error\")\n else:\n flash(gettext(\"An error occurred saving this user\"\n \" to the database.\"\n \" Please inform your admin.\"),\n \"error\")\n current_app.logger.error(\"Adding user \"\n \"'{}' failed: {}\".format(\n username, e))\n\n if form_valid:\n return redirect(url_for('admin.new_user_two_factor',\n uid=new_user.id))\n\n return render_template(\"admin_add_user.html\",\n password=make_password(config),\n form=form)\n\n @view.route('/2fa', methods=('GET', 'POST'))\n @admin_required\n def new_user_two_factor():\n user = Journalist.query.get(request.args['uid'])\n\n if request.method == 'POST':\n token = request.form['token']\n if user.verify_token(token):\n flash(gettext(\n \"The two-factor code for user \\\"{user}\\\" was verified \"\n \"successfully.\").format(user=user.username),\n \"notification\")\n return redirect(url_for(\"admin.index\"))\n else:\n flash(gettext(\n \"There was a problem verifying the two-factor code. Please try again.\"),\n \"error\")\n\n return render_template(\"admin_new_user_two_factor.html\", user=user)\n\n @view.route('/reset-2fa-totp', methods=['POST'])\n @admin_required\n def reset_two_factor_totp():\n uid = request.form['uid']\n user = Journalist.query.get(uid)\n user.is_totp = True\n user.regenerate_totp_shared_secret()\n db.session.commit()\n return redirect(url_for('admin.new_user_two_factor', uid=uid))\n\n @view.route('/reset-2fa-hotp', methods=['POST'])\n @admin_required\n def reset_two_factor_hotp():\n uid = request.form['uid']\n otp_secret = request.form.get('otp_secret', None)\n if otp_secret:\n user = Journalist.query.get(uid)\n if not validate_hotp_secret(user, otp_secret):\n return render_template('admin_edit_hotp_secret.html', uid=uid)\n db.session.commit()\n return redirect(url_for('admin.new_user_two_factor', uid=uid))\n else:\n return render_template('admin_edit_hotp_secret.html', uid=uid)\n\n @view.route('/edit/<int:user_id>', methods=('GET', 'POST'))\n @admin_required\n def edit_user(user_id):\n user = Journalist.query.get(user_id)\n\n if request.method == 'POST':\n if request.form.get('username', None):\n new_username = request.form['username']\n\n try:\n Journalist.check_username_acceptable(new_username)\n except InvalidUsernameException as e:\n flash('Invalid username: ' + str(e), 'error')\n return redirect(url_for(\"admin.edit_user\",\n user_id=user_id))\n\n if new_username == user.username:\n pass\n elif Journalist.query.filter_by(\n username=new_username).one_or_none():\n flash(gettext(\n 'Username \"{user}\" already taken.').format(\n user=new_username),\n \"error\")\n return redirect(url_for(\"admin.edit_user\",\n user_id=user_id))\n else:\n user.username = new_username\n\n try:\n first_name = request.form['first_name']\n Journalist.check_name_acceptable(first_name)\n user.first_name = first_name\n except FirstOrLastNameError as e:\n flash(gettext('Name not updated: {}'.format(e)), \"error\")\n return redirect(url_for(\"admin.edit_user\", user_id=user_id))\n\n try:\n last_name = request.form['last_name']\n Journalist.check_name_acceptable(last_name)\n user.last_name = last_name\n except FirstOrLastNameError as e:\n flash(gettext('Name not updated: {}'.format(e)), \"error\")\n return redirect(url_for(\"admin.edit_user\", user_id=user_id))\n\n user.is_admin = bool(request.form.get('is_admin'))\n\n commit_account_changes(user)\n\n password = make_password(config)\n return render_template(\"edit_account.html\", user=user,\n password=password)\n\n @view.route('/edit/<int:user_id>/new-password', methods=('POST',))\n @admin_required\n def set_password(user_id):\n try:\n user = Journalist.query.get(user_id)\n except NoResultFound:\n abort(404)\n\n password = request.form.get('password')\n if set_diceware_password(user, password) is not False:\n if user.last_token is not None:\n revoke_token(user, user.last_token)\n user.session_nonce += 1\n db.session.commit()\n\n return redirect(url_for('admin.edit_user', user_id=user_id))\n\n @view.route('/delete/<int:user_id>', methods=('POST',))\n @admin_required\n def delete_user(user_id):\n user = Journalist.query.get(user_id)\n if user_id == g.user.id:\n # Do not flash because the interface already has safe guards.\n # It can only happen by manually crafting a POST request\n current_app.logger.error(\n \"Admin {} tried to delete itself\".format(g.user.username))\n abort(403)\n elif user:\n db.session.delete(user)\n db.session.commit()\n flash(gettext(\"Deleted user '{user}'.\").format(\n user=user.username), \"notification\")\n else:\n current_app.logger.error(\n \"Admin {} tried to delete nonexistent user with pk={}\".format(\n g.user.username, user_id))\n abort(404)\n\n return redirect(url_for('admin.index'))\n\n @view.route('/edit/<int:user_id>/new-password', methods=('POST',))\n @admin_required\n def new_password(user_id):\n try:\n user = Journalist.query.get(user_id)\n except NoResultFound:\n abort(404)\n\n password = request.form.get('password')\n if set_diceware_password(user, password) is not False:\n if user.last_token is not None:\n revoke_token(user, user.last_token)\n user.session_nonce += 1\n db.session.commit()\n return redirect(url_for('admin.edit_user', user_id=user_id))\n\n @view.route('/ossec-test')\n @admin_required\n def ossec_test():\n current_app.logger.error('This is a test OSSEC alert')\n flash(gettext('Test alert sent. Please check your email.'),\n 'notification')\n return redirect(url_for('admin.manage_config'))\n\n return view\n", "path": "securedrop/journalist_app/admin.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport os\n\nfrom flask import (Blueprint, render_template, request, url_for, redirect, g,\n current_app, flash, abort)\nfrom flask_babel import gettext\nfrom sqlalchemy.exc import IntegrityError\nfrom sqlalchemy.orm.exc import NoResultFound\n\nfrom db import db\nfrom models import (InstanceConfig, Journalist, InvalidUsernameException,\n FirstOrLastNameError, PasswordError)\nfrom journalist_app.decorators import admin_required\nfrom journalist_app.utils import (make_password, commit_account_changes, set_diceware_password,\n validate_hotp_secret, revoke_token)\nfrom journalist_app.forms import LogoForm, NewUserForm, SubmissionPreferencesForm\n\n\ndef make_blueprint(config):\n view = Blueprint('admin', __name__)\n\n @view.route('/', methods=('GET', 'POST'))\n @admin_required\n def index():\n users = Journalist.query.all()\n return render_template(\"admin.html\", users=users)\n\n @view.route('/config', methods=('GET', 'POST'))\n @admin_required\n def manage_config():\n # The UI prompt (\"prevent\") is the opposite of the setting (\"allow\"):\n submission_preferences_form = SubmissionPreferencesForm(\n prevent_document_uploads=not current_app.instance_config.allow_document_uploads)\n logo_form = LogoForm()\n if logo_form.validate_on_submit():\n f = logo_form.logo.data\n custom_logo_filepath = os.path.join(current_app.static_folder, 'i',\n 'custom_logo.png')\n try:\n f.save(custom_logo_filepath)\n flash(gettext(\"Image updated.\"), \"logo-success\")\n except Exception:\n flash(\"Unable to process the image file.\"\n \" Try another one.\", \"logo-error\")\n finally:\n return redirect(url_for(\"admin.manage_config\"))\n else:\n for field, errors in list(logo_form.errors.items()):\n for error in errors:\n flash(error, \"logo-error\")\n return render_template(\"config.html\",\n submission_preferences_form=submission_preferences_form,\n logo_form=logo_form)\n\n @view.route('/update-submission-preferences', methods=['POST'])\n @admin_required\n def update_submission_preferences():\n form = SubmissionPreferencesForm()\n if form.validate_on_submit():\n # The UI prompt (\"prevent\") is the opposite of the setting (\"allow\"):\n flash(gettext(\"Preferences saved.\"), \"submission-preferences-success\")\n value = not bool(request.form.get('prevent_document_uploads'))\n InstanceConfig.set('allow_document_uploads', value)\n return redirect(url_for('admin.manage_config'))\n\n @view.route('/add', methods=('GET', 'POST'))\n @admin_required\n def add_user():\n form = NewUserForm()\n if form.validate_on_submit():\n form_valid = True\n username = request.form['username']\n first_name = request.form['first_name']\n last_name = request.form['last_name']\n password = request.form['password']\n is_admin = bool(request.form.get('is_admin'))\n\n try:\n otp_secret = None\n if request.form.get('is_hotp', False):\n otp_secret = request.form.get('otp_secret', '')\n new_user = Journalist(username=username,\n password=password,\n first_name=first_name,\n last_name=last_name,\n is_admin=is_admin,\n otp_secret=otp_secret)\n db.session.add(new_user)\n db.session.commit()\n except PasswordError:\n flash(gettext(\n 'There was an error with the autogenerated password. '\n 'User not created. Please try again.'), 'error')\n form_valid = False\n except InvalidUsernameException as e:\n form_valid = False\n flash('Invalid username: ' + str(e), \"error\")\n except IntegrityError as e:\n db.session.rollback()\n form_valid = False\n if \"UNIQUE constraint failed: journalists.username\" in str(e):\n flash(gettext('Username \"{user}\" already taken.'.format(\n user=username)), \"error\")\n else:\n flash(gettext(\"An error occurred saving this user\"\n \" to the database.\"\n \" Please inform your admin.\"),\n \"error\")\n current_app.logger.error(\"Adding user \"\n \"'{}' failed: {}\".format(\n username, e))\n\n if form_valid:\n return redirect(url_for('admin.new_user_two_factor',\n uid=new_user.id))\n\n return render_template(\"admin_add_user.html\",\n password=make_password(config),\n form=form)\n\n @view.route('/2fa', methods=('GET', 'POST'))\n @admin_required\n def new_user_two_factor():\n user = Journalist.query.get(request.args['uid'])\n\n if request.method == 'POST':\n token = request.form['token']\n if user.verify_token(token):\n flash(gettext(\n \"The two-factor code for user \\\"{user}\\\" was verified \"\n \"successfully.\").format(user=user.username),\n \"notification\")\n return redirect(url_for(\"admin.index\"))\n else:\n flash(gettext(\n \"There was a problem verifying the two-factor code. Please try again.\"),\n \"error\")\n\n return render_template(\"admin_new_user_two_factor.html\", user=user)\n\n @view.route('/reset-2fa-totp', methods=['POST'])\n @admin_required\n def reset_two_factor_totp():\n uid = request.form['uid']\n user = Journalist.query.get(uid)\n user.is_totp = True\n user.regenerate_totp_shared_secret()\n db.session.commit()\n return redirect(url_for('admin.new_user_two_factor', uid=uid))\n\n @view.route('/reset-2fa-hotp', methods=['POST'])\n @admin_required\n def reset_two_factor_hotp():\n uid = request.form['uid']\n otp_secret = request.form.get('otp_secret', None)\n if otp_secret:\n user = Journalist.query.get(uid)\n if not validate_hotp_secret(user, otp_secret):\n return render_template('admin_edit_hotp_secret.html', uid=uid)\n db.session.commit()\n return redirect(url_for('admin.new_user_two_factor', uid=uid))\n else:\n return render_template('admin_edit_hotp_secret.html', uid=uid)\n\n @view.route('/edit/<int:user_id>', methods=('GET', 'POST'))\n @admin_required\n def edit_user(user_id):\n user = Journalist.query.get(user_id)\n\n if request.method == 'POST':\n if request.form.get('username', None):\n new_username = request.form['username']\n\n try:\n Journalist.check_username_acceptable(new_username)\n except InvalidUsernameException as e:\n flash('Invalid username: ' + str(e), 'error')\n return redirect(url_for(\"admin.edit_user\",\n user_id=user_id))\n\n if new_username == user.username:\n pass\n elif Journalist.query.filter_by(\n username=new_username).one_or_none():\n flash(gettext(\n 'Username \"{user}\" already taken.').format(\n user=new_username),\n \"error\")\n return redirect(url_for(\"admin.edit_user\",\n user_id=user_id))\n else:\n user.username = new_username\n\n try:\n first_name = request.form['first_name']\n Journalist.check_name_acceptable(first_name)\n user.first_name = first_name\n except FirstOrLastNameError as e:\n flash(gettext('Name not updated: {}'.format(e)), \"error\")\n return redirect(url_for(\"admin.edit_user\", user_id=user_id))\n\n try:\n last_name = request.form['last_name']\n Journalist.check_name_acceptable(last_name)\n user.last_name = last_name\n except FirstOrLastNameError as e:\n flash(gettext('Name not updated: {}'.format(e)), \"error\")\n return redirect(url_for(\"admin.edit_user\", user_id=user_id))\n\n user.is_admin = bool(request.form.get('is_admin'))\n\n commit_account_changes(user)\n\n password = make_password(config)\n return render_template(\"edit_account.html\", user=user,\n password=password)\n\n @view.route('/delete/<int:user_id>', methods=('POST',))\n @admin_required\n def delete_user(user_id):\n user = Journalist.query.get(user_id)\n if user_id == g.user.id:\n # Do not flash because the interface already has safe guards.\n # It can only happen by manually crafting a POST request\n current_app.logger.error(\n \"Admin {} tried to delete itself\".format(g.user.username))\n abort(403)\n elif user:\n db.session.delete(user)\n db.session.commit()\n flash(gettext(\"Deleted user '{user}'.\").format(\n user=user.username), \"notification\")\n else:\n current_app.logger.error(\n \"Admin {} tried to delete nonexistent user with pk={}\".format(\n g.user.username, user_id))\n abort(404)\n\n return redirect(url_for('admin.index'))\n\n @view.route('/edit/<int:user_id>/new-password', methods=('POST',))\n @admin_required\n def new_password(user_id):\n try:\n user = Journalist.query.get(user_id)\n except NoResultFound:\n abort(404)\n\n password = request.form.get('password')\n if set_diceware_password(user, password) is not False:\n if user.last_token is not None:\n revoke_token(user, user.last_token)\n user.session_nonce += 1\n db.session.commit()\n return redirect(url_for('admin.edit_user', user_id=user_id))\n\n @view.route('/ossec-test')\n @admin_required\n def ossec_test():\n current_app.logger.error('This is a test OSSEC alert')\n flash(gettext('Test alert sent. Please check your email.'),\n 'notification')\n return redirect(url_for('admin.manage_config'))\n\n return view\n", "path": "securedrop/journalist_app/admin.py"}]}
| 3,315 | 251 |
gh_patches_debug_9935
|
rasdani/github-patches
|
git_diff
|
mosaicml__composer-850
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
tutorial url for CallbackHparams doesn't exist
The url pointed to in https://github.com/mosaicml/composer/blob/dev/composer/callbacks/callback_hparams.py#L35 doesn't exist: https://docs.mosaicml.com/en/latest/tutorials/adding_models_datasets.html#trainer-with-yahp
(I'd like to add a callback...)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `composer/callbacks/callback_hparams.py`
Content:
```
1 # Copyright 2021 MosaicML. All Rights Reserved.
2
3 """Hyperparameters for callbacks."""
4 from __future__ import annotations
5
6 import abc
7 import textwrap
8 from dataclasses import dataclass
9 from typing import Optional
10
11 import yahp as hp
12
13 from composer.callbacks.checkpoint_saver import CheckpointSaver
14 from composer.callbacks.grad_monitor import GradMonitor
15 from composer.callbacks.lr_monitor import LRMonitor
16 from composer.callbacks.memory_monitor import MemoryMonitor
17 from composer.callbacks.speed_monitor import SpeedMonitor
18 from composer.core.callback import Callback
19 from composer.core.time import Time
20 from composer.utils import import_object
21
22 __all__ = [
23 "CallbackHparams",
24 "GradMonitorHparams",
25 "MemoryMonitorHparams",
26 "LRMonitorHparams",
27 "SpeedMonitorHparams",
28 "CheckpointSaverHparams",
29 ]
30
31
32 @dataclass
33 class CallbackHparams(hp.Hparams, abc.ABC):
34 """Base class for callback hyperparameters.
35
36 Callback parameters that are added to the callbacks argument of
37 :attr:`~composer.trainer.trainer_hparams.TrainerHparams` (e.g., via YAML or the CLI). See `Trainer with YAHP <https://docs.mosaicml.com/en/latest/tutorials/adding_models_datasets.html#trainer-with-yahp>`_ for more details.
38 These are initialized in the training loop.
39 """
40
41 @abc.abstractmethod
42 def initialize_object(self) -> Callback:
43 """Initialize the callback.
44
45 Returns:
46 Callback: An instance of the callback.
47 """
48 pass
49
50
51 @dataclass
52 class GradMonitorHparams(CallbackHparams):
53 """:class:`~.GradMonitor` hyperparamters.
54
55 Args:
56 log_layer_grad_norms (bool, optional):
57 See :class:`~.GradMonitor` for documentation.
58 """
59
60 log_layer_grad_norms: bool = hp.optional(
61 doc="Whether to log gradient norms for individual layers.",
62 default=False,
63 )
64
65 def initialize_object(self) -> GradMonitor:
66 """Initialize the GradMonitor callback.
67
68 Returns:
69 GradMonitor: An instance of :class:`~.GradMonitor`.
70 """
71 return GradMonitor(log_layer_grad_norms=self.log_layer_grad_norms)
72
73
74 @dataclass
75 class MemoryMonitorHparams(CallbackHparams):
76 """:class:`~.MemoryMonitor` hyperparameters.
77
78 There are no parameters as :class:`~.MemoryMonitor` does not take any parameters.
79 """
80
81 def initialize_object(self) -> MemoryMonitor:
82 """Initialize the MemoryMonitor callback.
83
84 Returns:
85 MemoryMonitor: An instance of :class:`~.MemoryMonitor`.
86 """
87 return MemoryMonitor()
88
89
90 @dataclass
91 class LRMonitorHparams(CallbackHparams):
92 """:class:`~.LRMonitor` hyperparameters.
93
94 There are no parameters as :class:`~.LRMonitor` does not take any parameters.
95 """
96
97 def initialize_object(self) -> LRMonitor:
98 """Initialize the LRMonitor callback.
99
100 Returns:
101 LRMonitor: An instance of :class:`~.LRMonitor`.
102 """
103 return LRMonitor()
104
105
106 @dataclass
107 class SpeedMonitorHparams(CallbackHparams):
108 """:class:`~.SpeedMonitor` hyperparameters.
109
110 Args:
111 window_size (int, optional): See :class:`~.SpeedMonitor` for documentation.
112 """
113 window_size: int = hp.optional(
114 doc="Number of batchs to use for a rolling average of throughput.",
115 default=100,
116 )
117
118 def initialize_object(self) -> SpeedMonitor:
119 """Initialize the SpeedMonitor callback.
120
121 Returns:
122 SpeedMonitor: An instance of :class:`~.SpeedMonitor`.
123 """
124 return SpeedMonitor(window_size=self.window_size)
125
126
127 @dataclass
128 class CheckpointSaverHparams(CallbackHparams):
129 """:class:`~.CheckpointSaver` hyperparameters.
130
131 Args:
132 save_folder (str, optional): See :class:`~.CheckpointSaver`.
133 filename (str, optional): See :class:`~.CheckpointSaver`.
134 artifact_name (str, optional): See :class:`~.CheckpointSaver`.
135 latest_filename (str, optional): See :class:`~.CheckpointSaver`.
136 overwrite (str, optional): See :class:`~.CheckpointSaver`.
137 weights_only (bool, optional): See :class:`~.CheckpointSaver`.
138 num_checkpoints_to_keep (int, optional): See :class:`~.CheckpointSaver`.
139
140 save_interval (str, optional): Either a :doc:`time-string </trainer/time>` or a path to a function.
141
142 If a :doc:`time-string </trainer/time>`, checkpoints will be saved according to this interval.
143
144 If a path to a function, it should be of the format ``'path.to.function:function_name'``. The function
145 should take (:class:`~.State`, :class:`~.Event`) and return a
146 boolean indicating whether a checkpoint should be saved given the current state and event. The event will
147 be either :attr:`~composer.core.event.Event.BATCH_CHECKPOINT` or
148 :attr:`~composer.core.event.Event.EPOCH_CHECKPOINT`.
149 """
150 save_folder: str = hp.optional(doc="Folder where checkpoints will be saved.", default="{run_name}/checkpoints")
151 filename: str = hp.optional("Checkpoint name format string.", default="ep{epoch}-ba{batch}-rank{rank}")
152 artifact_name: str = hp.optional("Checkpoint artifact name format string.",
153 default="{run_name}/checkpoints/ep{epoch}-ba{batch}-rank{rank}")
154 latest_filename: Optional[str] = hp.optional("Latest checkpoint symlink format string.",
155 default="latest-rank{rank}")
156 overwrite: bool = hp.optional("Whether to override existing checkpoints.", default=False)
157 weights_only: bool = hp.optional("Whether to save only checkpoint weights", default=False)
158 save_interval: str = hp.optional(textwrap.dedent("""\
159 Checkpoint interval or path to a `(State, Event) -> bool` function
160 returning whether a checkpoint should be saved."""),
161 default="1ep")
162 num_checkpoints_to_keep: int = hp.optional(
163 "Number of checkpoints to persist locally. Set to -1 to never delete checkpoints.",
164 default=-1,
165 )
166
167 def initialize_object(self) -> CheckpointSaver:
168 try:
169 save_interval = Time.from_timestring(self.save_interval)
170 except ValueError:
171 # assume it is a function path
172 save_interval = import_object(self.save_interval)
173 return CheckpointSaver(
174 folder=self.save_folder,
175 filename=self.filename,
176 artifact_name=self.artifact_name,
177 latest_filename=self.latest_filename,
178 overwrite=self.overwrite,
179 save_interval=save_interval,
180 weights_only=self.weights_only,
181 num_checkpoints_to_keep=self.num_checkpoints_to_keep,
182 )
183
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/composer/callbacks/callback_hparams.py b/composer/callbacks/callback_hparams.py
--- a/composer/callbacks/callback_hparams.py
+++ b/composer/callbacks/callback_hparams.py
@@ -31,12 +31,7 @@
@dataclass
class CallbackHparams(hp.Hparams, abc.ABC):
- """Base class for callback hyperparameters.
-
- Callback parameters that are added to the callbacks argument of
- :attr:`~composer.trainer.trainer_hparams.TrainerHparams` (e.g., via YAML or the CLI). See `Trainer with YAHP <https://docs.mosaicml.com/en/latest/tutorials/adding_models_datasets.html#trainer-with-yahp>`_ for more details.
- These are initialized in the training loop.
- """
+ """Base class for Callback hyperparameters."""
@abc.abstractmethod
def initialize_object(self) -> Callback:
|
{"golden_diff": "diff --git a/composer/callbacks/callback_hparams.py b/composer/callbacks/callback_hparams.py\n--- a/composer/callbacks/callback_hparams.py\n+++ b/composer/callbacks/callback_hparams.py\n@@ -31,12 +31,7 @@\n \n @dataclass\n class CallbackHparams(hp.Hparams, abc.ABC):\n- \"\"\"Base class for callback hyperparameters.\n-\n- Callback parameters that are added to the callbacks argument of\n- :attr:`~composer.trainer.trainer_hparams.TrainerHparams` (e.g., via YAML or the CLI). See `Trainer with YAHP <https://docs.mosaicml.com/en/latest/tutorials/adding_models_datasets.html#trainer-with-yahp>`_ for more details.\n- These are initialized in the training loop.\n- \"\"\"\n+ \"\"\"Base class for Callback hyperparameters.\"\"\"\n \n @abc.abstractmethod\n def initialize_object(self) -> Callback:\n", "issue": "tutorial url for CallbackHparams doesn't exist\nThe url pointed to in https://github.com/mosaicml/composer/blob/dev/composer/callbacks/callback_hparams.py#L35 doesn't exist: https://docs.mosaicml.com/en/latest/tutorials/adding_models_datasets.html#trainer-with-yahp\r\n\r\n(I'd like to add a callback...)\n", "before_files": [{"content": "# Copyright 2021 MosaicML. All Rights Reserved.\n\n\"\"\"Hyperparameters for callbacks.\"\"\"\nfrom __future__ import annotations\n\nimport abc\nimport textwrap\nfrom dataclasses import dataclass\nfrom typing import Optional\n\nimport yahp as hp\n\nfrom composer.callbacks.checkpoint_saver import CheckpointSaver\nfrom composer.callbacks.grad_monitor import GradMonitor\nfrom composer.callbacks.lr_monitor import LRMonitor\nfrom composer.callbacks.memory_monitor import MemoryMonitor\nfrom composer.callbacks.speed_monitor import SpeedMonitor\nfrom composer.core.callback import Callback\nfrom composer.core.time import Time\nfrom composer.utils import import_object\n\n__all__ = [\n \"CallbackHparams\",\n \"GradMonitorHparams\",\n \"MemoryMonitorHparams\",\n \"LRMonitorHparams\",\n \"SpeedMonitorHparams\",\n \"CheckpointSaverHparams\",\n]\n\n\n@dataclass\nclass CallbackHparams(hp.Hparams, abc.ABC):\n \"\"\"Base class for callback hyperparameters.\n\n Callback parameters that are added to the callbacks argument of\n :attr:`~composer.trainer.trainer_hparams.TrainerHparams` (e.g., via YAML or the CLI). See `Trainer with YAHP <https://docs.mosaicml.com/en/latest/tutorials/adding_models_datasets.html#trainer-with-yahp>`_ for more details.\n These are initialized in the training loop.\n \"\"\"\n\n @abc.abstractmethod\n def initialize_object(self) -> Callback:\n \"\"\"Initialize the callback.\n\n Returns:\n Callback: An instance of the callback.\n \"\"\"\n pass\n\n\n@dataclass\nclass GradMonitorHparams(CallbackHparams):\n \"\"\":class:`~.GradMonitor` hyperparamters.\n\n Args:\n log_layer_grad_norms (bool, optional): \n See :class:`~.GradMonitor` for documentation.\n \"\"\"\n\n log_layer_grad_norms: bool = hp.optional(\n doc=\"Whether to log gradient norms for individual layers.\",\n default=False,\n )\n\n def initialize_object(self) -> GradMonitor:\n \"\"\"Initialize the GradMonitor callback.\n\n Returns:\n GradMonitor: An instance of :class:`~.GradMonitor`.\n \"\"\"\n return GradMonitor(log_layer_grad_norms=self.log_layer_grad_norms)\n\n\n@dataclass\nclass MemoryMonitorHparams(CallbackHparams):\n \"\"\":class:`~.MemoryMonitor` hyperparameters.\n\n There are no parameters as :class:`~.MemoryMonitor` does not take any parameters.\n \"\"\"\n\n def initialize_object(self) -> MemoryMonitor:\n \"\"\"Initialize the MemoryMonitor callback.\n\n Returns:\n MemoryMonitor: An instance of :class:`~.MemoryMonitor`.\n \"\"\"\n return MemoryMonitor()\n\n\n@dataclass\nclass LRMonitorHparams(CallbackHparams):\n \"\"\":class:`~.LRMonitor` hyperparameters.\n\n There are no parameters as :class:`~.LRMonitor` does not take any parameters.\n \"\"\"\n\n def initialize_object(self) -> LRMonitor:\n \"\"\"Initialize the LRMonitor callback.\n\n Returns:\n LRMonitor: An instance of :class:`~.LRMonitor`.\n \"\"\"\n return LRMonitor()\n\n\n@dataclass\nclass SpeedMonitorHparams(CallbackHparams):\n \"\"\":class:`~.SpeedMonitor` hyperparameters.\n\n Args:\n window_size (int, optional): See :class:`~.SpeedMonitor` for documentation.\n \"\"\"\n window_size: int = hp.optional(\n doc=\"Number of batchs to use for a rolling average of throughput.\",\n default=100,\n )\n\n def initialize_object(self) -> SpeedMonitor:\n \"\"\"Initialize the SpeedMonitor callback.\n\n Returns:\n SpeedMonitor: An instance of :class:`~.SpeedMonitor`.\n \"\"\"\n return SpeedMonitor(window_size=self.window_size)\n\n\n@dataclass\nclass CheckpointSaverHparams(CallbackHparams):\n \"\"\":class:`~.CheckpointSaver` hyperparameters.\n \n Args:\n save_folder (str, optional): See :class:`~.CheckpointSaver`.\n filename (str, optional): See :class:`~.CheckpointSaver`.\n artifact_name (str, optional): See :class:`~.CheckpointSaver`.\n latest_filename (str, optional): See :class:`~.CheckpointSaver`.\n overwrite (str, optional): See :class:`~.CheckpointSaver`.\n weights_only (bool, optional): See :class:`~.CheckpointSaver`.\n num_checkpoints_to_keep (int, optional): See :class:`~.CheckpointSaver`.\n\n save_interval (str, optional): Either a :doc:`time-string </trainer/time>` or a path to a function.\n\n If a :doc:`time-string </trainer/time>`, checkpoints will be saved according to this interval.\n\n If a path to a function, it should be of the format ``'path.to.function:function_name'``. The function\n should take (:class:`~.State`, :class:`~.Event`) and return a\n boolean indicating whether a checkpoint should be saved given the current state and event. The event will\n be either :attr:`~composer.core.event.Event.BATCH_CHECKPOINT` or\n :attr:`~composer.core.event.Event.EPOCH_CHECKPOINT`.\n \"\"\"\n save_folder: str = hp.optional(doc=\"Folder where checkpoints will be saved.\", default=\"{run_name}/checkpoints\")\n filename: str = hp.optional(\"Checkpoint name format string.\", default=\"ep{epoch}-ba{batch}-rank{rank}\")\n artifact_name: str = hp.optional(\"Checkpoint artifact name format string.\",\n default=\"{run_name}/checkpoints/ep{epoch}-ba{batch}-rank{rank}\")\n latest_filename: Optional[str] = hp.optional(\"Latest checkpoint symlink format string.\",\n default=\"latest-rank{rank}\")\n overwrite: bool = hp.optional(\"Whether to override existing checkpoints.\", default=False)\n weights_only: bool = hp.optional(\"Whether to save only checkpoint weights\", default=False)\n save_interval: str = hp.optional(textwrap.dedent(\"\"\"\\\n Checkpoint interval or path to a `(State, Event) -> bool` function\n returning whether a checkpoint should be saved.\"\"\"),\n default=\"1ep\")\n num_checkpoints_to_keep: int = hp.optional(\n \"Number of checkpoints to persist locally. Set to -1 to never delete checkpoints.\",\n default=-1,\n )\n\n def initialize_object(self) -> CheckpointSaver:\n try:\n save_interval = Time.from_timestring(self.save_interval)\n except ValueError:\n # assume it is a function path\n save_interval = import_object(self.save_interval)\n return CheckpointSaver(\n folder=self.save_folder,\n filename=self.filename,\n artifact_name=self.artifact_name,\n latest_filename=self.latest_filename,\n overwrite=self.overwrite,\n save_interval=save_interval,\n weights_only=self.weights_only,\n num_checkpoints_to_keep=self.num_checkpoints_to_keep,\n )\n", "path": "composer/callbacks/callback_hparams.py"}], "after_files": [{"content": "# Copyright 2021 MosaicML. All Rights Reserved.\n\n\"\"\"Hyperparameters for callbacks.\"\"\"\nfrom __future__ import annotations\n\nimport abc\nimport textwrap\nfrom dataclasses import dataclass\nfrom typing import Optional\n\nimport yahp as hp\n\nfrom composer.callbacks.checkpoint_saver import CheckpointSaver\nfrom composer.callbacks.grad_monitor import GradMonitor\nfrom composer.callbacks.lr_monitor import LRMonitor\nfrom composer.callbacks.memory_monitor import MemoryMonitor\nfrom composer.callbacks.speed_monitor import SpeedMonitor\nfrom composer.core.callback import Callback\nfrom composer.core.time import Time\nfrom composer.utils import import_object\n\n__all__ = [\n \"CallbackHparams\",\n \"GradMonitorHparams\",\n \"MemoryMonitorHparams\",\n \"LRMonitorHparams\",\n \"SpeedMonitorHparams\",\n \"CheckpointSaverHparams\",\n]\n\n\n@dataclass\nclass CallbackHparams(hp.Hparams, abc.ABC):\n \"\"\"Base class for Callback hyperparameters.\"\"\"\n\n @abc.abstractmethod\n def initialize_object(self) -> Callback:\n \"\"\"Initialize the callback.\n\n Returns:\n Callback: An instance of the callback.\n \"\"\"\n pass\n\n\n@dataclass\nclass GradMonitorHparams(CallbackHparams):\n \"\"\":class:`~.GradMonitor` hyperparamters.\n\n Args:\n log_layer_grad_norms (bool, optional): \n See :class:`~.GradMonitor` for documentation.\n \"\"\"\n\n log_layer_grad_norms: bool = hp.optional(\n doc=\"Whether to log gradient norms for individual layers.\",\n default=False,\n )\n\n def initialize_object(self) -> GradMonitor:\n \"\"\"Initialize the GradMonitor callback.\n\n Returns:\n GradMonitor: An instance of :class:`~.GradMonitor`.\n \"\"\"\n return GradMonitor(log_layer_grad_norms=self.log_layer_grad_norms)\n\n\n@dataclass\nclass MemoryMonitorHparams(CallbackHparams):\n \"\"\":class:`~.MemoryMonitor` hyperparameters.\n\n There are no parameters as :class:`~.MemoryMonitor` does not take any parameters.\n \"\"\"\n\n def initialize_object(self) -> MemoryMonitor:\n \"\"\"Initialize the MemoryMonitor callback.\n\n Returns:\n MemoryMonitor: An instance of :class:`~.MemoryMonitor`.\n \"\"\"\n return MemoryMonitor()\n\n\n@dataclass\nclass LRMonitorHparams(CallbackHparams):\n \"\"\":class:`~.LRMonitor` hyperparameters.\n\n There are no parameters as :class:`~.LRMonitor` does not take any parameters.\n \"\"\"\n\n def initialize_object(self) -> LRMonitor:\n \"\"\"Initialize the LRMonitor callback.\n\n Returns:\n LRMonitor: An instance of :class:`~.LRMonitor`.\n \"\"\"\n return LRMonitor()\n\n\n@dataclass\nclass SpeedMonitorHparams(CallbackHparams):\n \"\"\":class:`~.SpeedMonitor` hyperparameters.\n\n Args:\n window_size (int, optional): See :class:`~.SpeedMonitor` for documentation.\n \"\"\"\n window_size: int = hp.optional(\n doc=\"Number of batchs to use for a rolling average of throughput.\",\n default=100,\n )\n\n def initialize_object(self) -> SpeedMonitor:\n \"\"\"Initialize the SpeedMonitor callback.\n\n Returns:\n SpeedMonitor: An instance of :class:`~.SpeedMonitor`.\n \"\"\"\n return SpeedMonitor(window_size=self.window_size)\n\n\n@dataclass\nclass CheckpointSaverHparams(CallbackHparams):\n \"\"\":class:`~.CheckpointSaver` hyperparameters.\n \n Args:\n save_folder (str, optional): See :class:`~.CheckpointSaver`.\n filename (str, optional): See :class:`~.CheckpointSaver`.\n artifact_name (str, optional): See :class:`~.CheckpointSaver`.\n latest_filename (str, optional): See :class:`~.CheckpointSaver`.\n overwrite (str, optional): See :class:`~.CheckpointSaver`.\n weights_only (bool, optional): See :class:`~.CheckpointSaver`.\n num_checkpoints_to_keep (int, optional): See :class:`~.CheckpointSaver`.\n\n save_interval (str, optional): Either a :doc:`time-string </trainer/time>` or a path to a function.\n\n If a :doc:`time-string </trainer/time>`, checkpoints will be saved according to this interval.\n\n If a path to a function, it should be of the format ``'path.to.function:function_name'``. The function\n should take (:class:`~.State`, :class:`~.Event`) and return a\n boolean indicating whether a checkpoint should be saved given the current state and event. The event will\n be either :attr:`~composer.core.event.Event.BATCH_CHECKPOINT` or\n :attr:`~composer.core.event.Event.EPOCH_CHECKPOINT`.\n \"\"\"\n save_folder: str = hp.optional(doc=\"Folder where checkpoints will be saved.\", default=\"{run_name}/checkpoints\")\n filename: str = hp.optional(\"Checkpoint name format string.\", default=\"ep{epoch}-ba{batch}-rank{rank}\")\n artifact_name: str = hp.optional(\"Checkpoint artifact name format string.\",\n default=\"{run_name}/checkpoints/ep{epoch}-ba{batch}-rank{rank}\")\n latest_filename: Optional[str] = hp.optional(\"Latest checkpoint symlink format string.\",\n default=\"latest-rank{rank}\")\n overwrite: bool = hp.optional(\"Whether to override existing checkpoints.\", default=False)\n weights_only: bool = hp.optional(\"Whether to save only checkpoint weights\", default=False)\n save_interval: str = hp.optional(textwrap.dedent(\"\"\"\\\n Checkpoint interval or path to a `(State, Event) -> bool` function\n returning whether a checkpoint should be saved.\"\"\"),\n default=\"1ep\")\n num_checkpoints_to_keep: int = hp.optional(\n \"Number of checkpoints to persist locally. Set to -1 to never delete checkpoints.\",\n default=-1,\n )\n\n def initialize_object(self) -> CheckpointSaver:\n try:\n save_interval = Time.from_timestring(self.save_interval)\n except ValueError:\n # assume it is a function path\n save_interval = import_object(self.save_interval)\n return CheckpointSaver(\n folder=self.save_folder,\n filename=self.filename,\n artifact_name=self.artifact_name,\n latest_filename=self.latest_filename,\n overwrite=self.overwrite,\n save_interval=save_interval,\n weights_only=self.weights_only,\n num_checkpoints_to_keep=self.num_checkpoints_to_keep,\n )\n", "path": "composer/callbacks/callback_hparams.py"}]}
| 2,235 | 201 |
gh_patches_debug_3588
|
rasdani/github-patches
|
git_diff
|
akvo__akvo-rsr-3753
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Show only relevant updates in typeahead on Akvo pages
Currently, all updates can be searched for on partner site updates typeahead.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `akvo/rest/views/typeahead.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 """Akvo RSR is covered by the GNU Affero General Public License.
4 See more details in the license.txt file located at the root folder of the
5 Akvo RSR module. For additional details on the GNU license please
6 see < http://www.gnu.org/licenses/agpl.html >.
7 """
8
9 from django.conf import settings
10 from rest_framework.decorators import api_view
11 from rest_framework.response import Response
12
13 from akvo.codelists.models import Country, Version
14 from akvo.rest.serializers import (TypeaheadCountrySerializer,
15 TypeaheadOrganisationSerializer,
16 TypeaheadProjectSerializer,
17 TypeaheadProjectUpdateSerializer,
18 TypeaheadKeywordSerializer,)
19 from akvo.rsr.models import Organisation, Project, ProjectUpdate
20 from akvo.rsr.views.project import _project_directory_coll
21
22
23 def rejig(queryset, serializer):
24 """Rearrange & add queryset count to the response data."""
25 return {
26 'count': queryset.count(),
27 'results': serializer.data
28 }
29
30
31 @api_view(['GET'])
32 def typeahead_country(request):
33 iati_version = Version.objects.get(code=settings.IATI_VERSION)
34 countries = Country.objects.filter(version=iati_version)
35 return Response(
36 rejig(countries, TypeaheadCountrySerializer(countries, many=True))
37 )
38
39
40 @api_view(['GET'])
41 def typeahead_organisation(request):
42 page = request.rsr_page
43 if request.GET.get('partners', '0') == '1' and page:
44 organisations = page.partners()
45 else:
46 # Project editor - all organizations
47 organisations = Organisation.objects.all()
48
49 organisations = organisations.values('id', 'name', 'long_name')
50
51 return Response(
52 rejig(organisations, TypeaheadOrganisationSerializer(organisations,
53 many=True))
54 )
55
56
57 @api_view(['GET'])
58 def typeahead_user_organisations(request):
59 user = request.user
60 is_admin = user.is_active and (user.is_superuser or user.is_admin)
61 organisations = user.approved_organisations() if not is_admin else Organisation.objects.all()
62 return Response(
63 rejig(organisations, TypeaheadOrganisationSerializer(organisations,
64 many=True))
65 )
66
67
68 @api_view(['GET'])
69 def typeahead_keyword(request):
70 page = request.rsr_page
71 keywords = page.keywords.all() if page else None
72 if keywords:
73 return Response(
74 rejig(keywords, TypeaheadKeywordSerializer(keywords, many=True))
75 )
76 # No keywords on rsr.akvo.org
77 return Response({})
78
79
80 @api_view(['GET'])
81 def typeahead_project(request):
82 """Return the typeaheads for projects.
83
84 Without any query parameters, it returns the info for all the projects in
85 the current context -- changes depending on whether we are on a partner
86 site, or the RSR site.
87
88 If a published query parameter is passed, only projects that have been
89 published are returned.
90
91 NOTE: The unauthenticated user gets information about all the projects when
92 using this API endpoint. More permission checking will need to be added,
93 if the amount of data being returned is changed.
94
95 """
96 if request.GET.get('published', '0') == '0':
97 # Project editor - organization projects, all
98 page = request.rsr_page
99 projects = page.all_projects() if page else Project.objects.all()
100 else:
101 # Search bar - organization projects, published
102 projects = _project_directory_coll(request)
103
104 projects = projects.exclude(title='')
105 return Response(
106 rejig(projects, TypeaheadProjectSerializer(projects, many=True))
107 )
108
109
110 @api_view(['GET'])
111 def typeahead_user_projects(request):
112 user = request.user
113 is_admin = user.is_active and (user.is_superuser or user.is_admin)
114 if is_admin:
115 projects = Project.objects.all()
116 else:
117 projects = user.approved_organisations().all_projects()
118 projects = projects.exclude(title='')
119 return Response(
120 rejig(projects, TypeaheadProjectSerializer(projects, many=True))
121 )
122
123
124 @api_view(['GET'])
125 def typeahead_impact_projects(request):
126 user = request.user
127 projects = Project.objects.all() if user.is_admin or user.is_superuser else user.my_projects()
128 projects = projects.published().filter(is_impact_project=True).order_by('title')
129
130 return Response(
131 rejig(projects, TypeaheadProjectSerializer(projects, many=True))
132 )
133
134
135 @api_view(['GET'])
136 def typeahead_projectupdate(request):
137 updates = ProjectUpdate.objects.all()
138 return Response(
139 rejig(updates, TypeaheadProjectUpdateSerializer(updates, many=True))
140 )
141
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/akvo/rest/views/typeahead.py b/akvo/rest/views/typeahead.py
--- a/akvo/rest/views/typeahead.py
+++ b/akvo/rest/views/typeahead.py
@@ -134,7 +134,8 @@
@api_view(['GET'])
def typeahead_projectupdate(request):
- updates = ProjectUpdate.objects.all()
+ page = request.rsr_page
+ updates = page.updates() if page else ProjectUpdate.objects.all()
return Response(
rejig(updates, TypeaheadProjectUpdateSerializer(updates, many=True))
)
|
{"golden_diff": "diff --git a/akvo/rest/views/typeahead.py b/akvo/rest/views/typeahead.py\n--- a/akvo/rest/views/typeahead.py\n+++ b/akvo/rest/views/typeahead.py\n@@ -134,7 +134,8 @@\n \n @api_view(['GET'])\n def typeahead_projectupdate(request):\n- updates = ProjectUpdate.objects.all()\n+ page = request.rsr_page\n+ updates = page.updates() if page else ProjectUpdate.objects.all()\n return Response(\n rejig(updates, TypeaheadProjectUpdateSerializer(updates, many=True))\n )\n", "issue": "Show only relevant updates in typeahead on Akvo pages\nCurrently, all updates can be searched for on partner site updates typeahead. \n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n\"\"\"Akvo RSR is covered by the GNU Affero General Public License.\nSee more details in the license.txt file located at the root folder of the\nAkvo RSR module. For additional details on the GNU license please\nsee < http://www.gnu.org/licenses/agpl.html >.\n\"\"\"\n\nfrom django.conf import settings\nfrom rest_framework.decorators import api_view\nfrom rest_framework.response import Response\n\nfrom akvo.codelists.models import Country, Version\nfrom akvo.rest.serializers import (TypeaheadCountrySerializer,\n TypeaheadOrganisationSerializer,\n TypeaheadProjectSerializer,\n TypeaheadProjectUpdateSerializer,\n TypeaheadKeywordSerializer,)\nfrom akvo.rsr.models import Organisation, Project, ProjectUpdate\nfrom akvo.rsr.views.project import _project_directory_coll\n\n\ndef rejig(queryset, serializer):\n \"\"\"Rearrange & add queryset count to the response data.\"\"\"\n return {\n 'count': queryset.count(),\n 'results': serializer.data\n }\n\n\n@api_view(['GET'])\ndef typeahead_country(request):\n iati_version = Version.objects.get(code=settings.IATI_VERSION)\n countries = Country.objects.filter(version=iati_version)\n return Response(\n rejig(countries, TypeaheadCountrySerializer(countries, many=True))\n )\n\n\n@api_view(['GET'])\ndef typeahead_organisation(request):\n page = request.rsr_page\n if request.GET.get('partners', '0') == '1' and page:\n organisations = page.partners()\n else:\n # Project editor - all organizations\n organisations = Organisation.objects.all()\n\n organisations = organisations.values('id', 'name', 'long_name')\n\n return Response(\n rejig(organisations, TypeaheadOrganisationSerializer(organisations,\n many=True))\n )\n\n\n@api_view(['GET'])\ndef typeahead_user_organisations(request):\n user = request.user\n is_admin = user.is_active and (user.is_superuser or user.is_admin)\n organisations = user.approved_organisations() if not is_admin else Organisation.objects.all()\n return Response(\n rejig(organisations, TypeaheadOrganisationSerializer(organisations,\n many=True))\n )\n\n\n@api_view(['GET'])\ndef typeahead_keyword(request):\n page = request.rsr_page\n keywords = page.keywords.all() if page else None\n if keywords:\n return Response(\n rejig(keywords, TypeaheadKeywordSerializer(keywords, many=True))\n )\n # No keywords on rsr.akvo.org\n return Response({})\n\n\n@api_view(['GET'])\ndef typeahead_project(request):\n \"\"\"Return the typeaheads for projects.\n\n Without any query parameters, it returns the info for all the projects in\n the current context -- changes depending on whether we are on a partner\n site, or the RSR site.\n\n If a published query parameter is passed, only projects that have been\n published are returned.\n\n NOTE: The unauthenticated user gets information about all the projects when\n using this API endpoint. More permission checking will need to be added,\n if the amount of data being returned is changed.\n\n \"\"\"\n if request.GET.get('published', '0') == '0':\n # Project editor - organization projects, all\n page = request.rsr_page\n projects = page.all_projects() if page else Project.objects.all()\n else:\n # Search bar - organization projects, published\n projects = _project_directory_coll(request)\n\n projects = projects.exclude(title='')\n return Response(\n rejig(projects, TypeaheadProjectSerializer(projects, many=True))\n )\n\n\n@api_view(['GET'])\ndef typeahead_user_projects(request):\n user = request.user\n is_admin = user.is_active and (user.is_superuser or user.is_admin)\n if is_admin:\n projects = Project.objects.all()\n else:\n projects = user.approved_organisations().all_projects()\n projects = projects.exclude(title='')\n return Response(\n rejig(projects, TypeaheadProjectSerializer(projects, many=True))\n )\n\n\n@api_view(['GET'])\ndef typeahead_impact_projects(request):\n user = request.user\n projects = Project.objects.all() if user.is_admin or user.is_superuser else user.my_projects()\n projects = projects.published().filter(is_impact_project=True).order_by('title')\n\n return Response(\n rejig(projects, TypeaheadProjectSerializer(projects, many=True))\n )\n\n\n@api_view(['GET'])\ndef typeahead_projectupdate(request):\n updates = ProjectUpdate.objects.all()\n return Response(\n rejig(updates, TypeaheadProjectUpdateSerializer(updates, many=True))\n )\n", "path": "akvo/rest/views/typeahead.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n\"\"\"Akvo RSR is covered by the GNU Affero General Public License.\nSee more details in the license.txt file located at the root folder of the\nAkvo RSR module. For additional details on the GNU license please\nsee < http://www.gnu.org/licenses/agpl.html >.\n\"\"\"\n\nfrom django.conf import settings\nfrom rest_framework.decorators import api_view\nfrom rest_framework.response import Response\n\nfrom akvo.codelists.models import Country, Version\nfrom akvo.rest.serializers import (TypeaheadCountrySerializer,\n TypeaheadOrganisationSerializer,\n TypeaheadProjectSerializer,\n TypeaheadProjectUpdateSerializer,\n TypeaheadKeywordSerializer,)\nfrom akvo.rsr.models import Organisation, Project, ProjectUpdate\nfrom akvo.rsr.views.project import _project_directory_coll\n\n\ndef rejig(queryset, serializer):\n \"\"\"Rearrange & add queryset count to the response data.\"\"\"\n return {\n 'count': queryset.count(),\n 'results': serializer.data\n }\n\n\n@api_view(['GET'])\ndef typeahead_country(request):\n iati_version = Version.objects.get(code=settings.IATI_VERSION)\n countries = Country.objects.filter(version=iati_version)\n return Response(\n rejig(countries, TypeaheadCountrySerializer(countries, many=True))\n )\n\n\n@api_view(['GET'])\ndef typeahead_organisation(request):\n page = request.rsr_page\n if request.GET.get('partners', '0') == '1' and page:\n organisations = page.partners()\n else:\n # Project editor - all organizations\n organisations = Organisation.objects.all()\n\n organisations = organisations.values('id', 'name', 'long_name')\n\n return Response(\n rejig(organisations, TypeaheadOrganisationSerializer(organisations,\n many=True))\n )\n\n\n@api_view(['GET'])\ndef typeahead_user_organisations(request):\n user = request.user\n is_admin = user.is_active and (user.is_superuser or user.is_admin)\n organisations = user.approved_organisations() if not is_admin else Organisation.objects.all()\n return Response(\n rejig(organisations, TypeaheadOrganisationSerializer(organisations,\n many=True))\n )\n\n\n@api_view(['GET'])\ndef typeahead_keyword(request):\n page = request.rsr_page\n keywords = page.keywords.all() if page else None\n if keywords:\n return Response(\n rejig(keywords, TypeaheadKeywordSerializer(keywords, many=True))\n )\n # No keywords on rsr.akvo.org\n return Response({})\n\n\n@api_view(['GET'])\ndef typeahead_project(request):\n \"\"\"Return the typeaheads for projects.\n\n Without any query parameters, it returns the info for all the projects in\n the current context -- changes depending on whether we are on a partner\n site, or the RSR site.\n\n If a published query parameter is passed, only projects that have been\n published are returned.\n\n NOTE: The unauthenticated user gets information about all the projects when\n using this API endpoint. More permission checking will need to be added,\n if the amount of data being returned is changed.\n\n \"\"\"\n if request.GET.get('published', '0') == '0':\n # Project editor - organization projects, all\n page = request.rsr_page\n projects = page.all_projects() if page else Project.objects.all()\n else:\n # Search bar - organization projects, published\n projects = _project_directory_coll(request)\n\n projects = projects.exclude(title='')\n return Response(\n rejig(projects, TypeaheadProjectSerializer(projects, many=True))\n )\n\n\n@api_view(['GET'])\ndef typeahead_user_projects(request):\n user = request.user\n is_admin = user.is_active and (user.is_superuser or user.is_admin)\n if is_admin:\n projects = Project.objects.all()\n else:\n projects = user.approved_organisations().all_projects()\n projects = projects.exclude(title='')\n return Response(\n rejig(projects, TypeaheadProjectSerializer(projects, many=True))\n )\n\n\n@api_view(['GET'])\ndef typeahead_impact_projects(request):\n user = request.user\n projects = Project.objects.all() if user.is_admin or user.is_superuser else user.my_projects()\n projects = projects.published().filter(is_impact_project=True).order_by('title')\n\n return Response(\n rejig(projects, TypeaheadProjectSerializer(projects, many=True))\n )\n\n\n@api_view(['GET'])\ndef typeahead_projectupdate(request):\n page = request.rsr_page\n updates = page.updates() if page else ProjectUpdate.objects.all()\n return Response(\n rejig(updates, TypeaheadProjectUpdateSerializer(updates, many=True))\n )\n", "path": "akvo/rest/views/typeahead.py"}]}
| 1,605 | 129 |
gh_patches_debug_31114
|
rasdani/github-patches
|
git_diff
|
bridgecrewio__checkov-2154
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
CKV_AWS_174 incorrect reporting
CKV_AWS_174 is being triggered in our terraform code even though we have the viewer certificate set to use TLSv.1.2. Snippet of our code here:
viewer_certificate {
acm_certificate_arn = aws_acm_certificate.cert.arn
ssl_support_method = "sni-only"
minimum_protocol_version = "TLSv1.2_2019"
}
Steps to reproduce the behavior:
Running checkov on our terraform code
**Expected behavior**
This check should be passed
**Additional context**
It looks to me like the issue is in the code between lines 17 and 19. I dont think based on the terraform documentation and the if statements that it would ever pass if using an acm certificate
https://github.com/bridgecrewio/checkov/blob/master/checkov/terraform/checks/resource/aws/CloudfrontTLS12.py
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `checkov/terraform/checks/resource/aws/CloudfrontTLS12.py`
Content:
```
1 from checkov.common.models.enums import CheckCategories, CheckResult
2 from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck
3
4
5 class CloudFrontTLS12(BaseResourceValueCheck):
6 def __init__(self):
7 name = "Verify CloudFront Distribution Viewer Certificate is using TLS v1.2"
8 id = "CKV_AWS_174"
9 supported_resources = ["aws_cloudfront_distribution"]
10 categories = [CheckCategories.ENCRYPTION]
11 super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
12
13 def scan_resource_conf(self, conf):
14 if "viewer_certificate" in conf.keys():
15 # check if cloudfront_default_certificate is true then this could use less than tls 1.2
16 viewer_certificate = conf["viewer_certificate"][0]
17 if 'cloudfront_default_certificate' in viewer_certificate:
18 #is not using the default certificate
19 if viewer_certificate["cloudfront_default_certificate"] is not True:
20 #these protocol versions
21 if "minimum_protocol_version" in viewer_certificate:
22 protocol=viewer_certificate["minimum_protocol_version"][0]
23 if protocol in ['TLSv1.2_2018', 'TLSv1.2_2019', 'TLSv1.2_2021']:
24 return CheckResult.PASSED
25
26 #No cert specified so using default which can be less that tls 1.2
27 return CheckResult.FAILED
28
29 def get_inspected_key(self):
30
31 return "viewer_certificate/[0]/minimum_protocol_version"
32
33 def get_expected_values(self):
34 return ['TLSv1.2_2018', 'TLSv1.2_2019', 'TLSv1.2_2021']
35
36
37 check = CloudFrontTLS12()
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/checkov/terraform/checks/resource/aws/CloudfrontTLS12.py b/checkov/terraform/checks/resource/aws/CloudfrontTLS12.py
--- a/checkov/terraform/checks/resource/aws/CloudfrontTLS12.py
+++ b/checkov/terraform/checks/resource/aws/CloudfrontTLS12.py
@@ -12,18 +12,18 @@
def scan_resource_conf(self, conf):
if "viewer_certificate" in conf.keys():
- # check if cloudfront_default_certificate is true then this could use less than tls 1.2
viewer_certificate = conf["viewer_certificate"][0]
- if 'cloudfront_default_certificate' in viewer_certificate:
- #is not using the default certificate
- if viewer_certificate["cloudfront_default_certificate"] is not True:
- #these protocol versions
- if "minimum_protocol_version" in viewer_certificate:
- protocol=viewer_certificate["minimum_protocol_version"][0]
- if protocol in ['TLSv1.2_2018', 'TLSv1.2_2019', 'TLSv1.2_2021']:
- return CheckResult.PASSED
-
- #No cert specified so using default which can be less that tls 1.2
+ # check if cloudfront_default_certificate is true then this could use less than tls 1.2
+ if ("cloudfront_default_certificate" in viewer_certificate and viewer_certificate
+ ["cloudfront_default_certificate"][0] is not True) or (
+ 'minimum_protocol_version' in viewer_certificate):
+ # is not using the default certificate
+ if 'minimum_protocol_version' in viewer_certificate:
+ protocol = viewer_certificate["minimum_protocol_version"][0]
+ # these protocol versions
+ if protocol in ['TLSv1.2_2018', 'TLSv1.2_2019', 'TLSv1.2_2021']:
+ return CheckResult.PASSED
+ # No cert specified so using default which can be less that tls 1.2
return CheckResult.FAILED
def get_inspected_key(self):
@@ -34,4 +34,4 @@
return ['TLSv1.2_2018', 'TLSv1.2_2019', 'TLSv1.2_2021']
-check = CloudFrontTLS12()
\ No newline at end of file
+check = CloudFrontTLS12()
|
{"golden_diff": "diff --git a/checkov/terraform/checks/resource/aws/CloudfrontTLS12.py b/checkov/terraform/checks/resource/aws/CloudfrontTLS12.py\n--- a/checkov/terraform/checks/resource/aws/CloudfrontTLS12.py\n+++ b/checkov/terraform/checks/resource/aws/CloudfrontTLS12.py\n@@ -12,18 +12,18 @@\n \n def scan_resource_conf(self, conf):\n if \"viewer_certificate\" in conf.keys():\n- # check if cloudfront_default_certificate is true then this could use less than tls 1.2\n viewer_certificate = conf[\"viewer_certificate\"][0]\n- if 'cloudfront_default_certificate' in viewer_certificate:\n- #is not using the default certificate\n- if viewer_certificate[\"cloudfront_default_certificate\"] is not True:\n- #these protocol versions\n- if \"minimum_protocol_version\" in viewer_certificate:\n- protocol=viewer_certificate[\"minimum_protocol_version\"][0]\n- if protocol in ['TLSv1.2_2018', 'TLSv1.2_2019', 'TLSv1.2_2021']:\n- return CheckResult.PASSED\n-\n- #No cert specified so using default which can be less that tls 1.2\n+ # check if cloudfront_default_certificate is true then this could use less than tls 1.2\n+ if (\"cloudfront_default_certificate\" in viewer_certificate and viewer_certificate\n+ [\"cloudfront_default_certificate\"][0] is not True) or (\n+ 'minimum_protocol_version' in viewer_certificate):\n+ # is not using the default certificate\n+ if 'minimum_protocol_version' in viewer_certificate:\n+ protocol = viewer_certificate[\"minimum_protocol_version\"][0]\n+ # these protocol versions\n+ if protocol in ['TLSv1.2_2018', 'TLSv1.2_2019', 'TLSv1.2_2021']:\n+ return CheckResult.PASSED\n+ # No cert specified so using default which can be less that tls 1.2\n return CheckResult.FAILED\n \n def get_inspected_key(self):\n@@ -34,4 +34,4 @@\n return ['TLSv1.2_2018', 'TLSv1.2_2019', 'TLSv1.2_2021']\n \n \n-check = CloudFrontTLS12()\n\\ No newline at end of file\n+check = CloudFrontTLS12()\n", "issue": "CKV_AWS_174 incorrect reporting\nCKV_AWS_174 is being triggered in our terraform code even though we have the viewer certificate set to use TLSv.1.2. Snippet of our code here:\r\n\r\nviewer_certificate {\r\n acm_certificate_arn = aws_acm_certificate.cert.arn\r\n ssl_support_method = \"sni-only\"\r\n minimum_protocol_version = \"TLSv1.2_2019\" \r\n}\r\n\r\n\r\nSteps to reproduce the behavior:\r\nRunning checkov on our terraform code\r\n\r\n**Expected behavior**\r\nThis check should be passed\r\n\r\n\r\n\r\n**Additional context**\r\nIt looks to me like the issue is in the code between lines 17 and 19. I dont think based on the terraform documentation and the if statements that it would ever pass if using an acm certificate\r\n\r\nhttps://github.com/bridgecrewio/checkov/blob/master/checkov/terraform/checks/resource/aws/CloudfrontTLS12.py\r\n\n", "before_files": [{"content": "from checkov.common.models.enums import CheckCategories, CheckResult\nfrom checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck\n\n\nclass CloudFrontTLS12(BaseResourceValueCheck):\n def __init__(self):\n name = \"Verify CloudFront Distribution Viewer Certificate is using TLS v1.2\"\n id = \"CKV_AWS_174\"\n supported_resources = [\"aws_cloudfront_distribution\"]\n categories = [CheckCategories.ENCRYPTION]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def scan_resource_conf(self, conf):\n if \"viewer_certificate\" in conf.keys():\n # check if cloudfront_default_certificate is true then this could use less than tls 1.2\n viewer_certificate = conf[\"viewer_certificate\"][0]\n if 'cloudfront_default_certificate' in viewer_certificate:\n #is not using the default certificate\n if viewer_certificate[\"cloudfront_default_certificate\"] is not True:\n #these protocol versions\n if \"minimum_protocol_version\" in viewer_certificate:\n protocol=viewer_certificate[\"minimum_protocol_version\"][0]\n if protocol in ['TLSv1.2_2018', 'TLSv1.2_2019', 'TLSv1.2_2021']:\n return CheckResult.PASSED\n\n #No cert specified so using default which can be less that tls 1.2\n return CheckResult.FAILED\n\n def get_inspected_key(self):\n\n return \"viewer_certificate/[0]/minimum_protocol_version\"\n\n def get_expected_values(self):\n return ['TLSv1.2_2018', 'TLSv1.2_2019', 'TLSv1.2_2021']\n\n\ncheck = CloudFrontTLS12()", "path": "checkov/terraform/checks/resource/aws/CloudfrontTLS12.py"}], "after_files": [{"content": "from checkov.common.models.enums import CheckCategories, CheckResult\nfrom checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck\n\n\nclass CloudFrontTLS12(BaseResourceValueCheck):\n def __init__(self):\n name = \"Verify CloudFront Distribution Viewer Certificate is using TLS v1.2\"\n id = \"CKV_AWS_174\"\n supported_resources = [\"aws_cloudfront_distribution\"]\n categories = [CheckCategories.ENCRYPTION]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def scan_resource_conf(self, conf):\n if \"viewer_certificate\" in conf.keys():\n viewer_certificate = conf[\"viewer_certificate\"][0]\n # check if cloudfront_default_certificate is true then this could use less than tls 1.2\n if (\"cloudfront_default_certificate\" in viewer_certificate and viewer_certificate\n [\"cloudfront_default_certificate\"][0] is not True) or (\n 'minimum_protocol_version' in viewer_certificate):\n # is not using the default certificate\n if 'minimum_protocol_version' in viewer_certificate:\n protocol = viewer_certificate[\"minimum_protocol_version\"][0]\n # these protocol versions\n if protocol in ['TLSv1.2_2018', 'TLSv1.2_2019', 'TLSv1.2_2021']:\n return CheckResult.PASSED\n # No cert specified so using default which can be less that tls 1.2\n return CheckResult.FAILED\n\n def get_inspected_key(self):\n\n return \"viewer_certificate/[0]/minimum_protocol_version\"\n\n def get_expected_values(self):\n return ['TLSv1.2_2018', 'TLSv1.2_2019', 'TLSv1.2_2021']\n\n\ncheck = CloudFrontTLS12()\n", "path": "checkov/terraform/checks/resource/aws/CloudfrontTLS12.py"}]}
| 930 | 551 |
gh_patches_debug_30894
|
rasdani/github-patches
|
git_diff
|
mars-project__mars-1712
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG][mars on ray] Failed to run in ray cluster mode.
**Describe the bug**
Ray cluster will load code from local (not deserialize from bytes), but mars `RemoteMetaStore` is nested in `RayStorage` class. Ray worker can't load the `RemoteMetaStore` from local code.
```
2020-11-17 16:48:29,349 WARNING worker.py:1157 -- Traceback (most recent call last):
File "/home/admin/.local/lib/python3.6/site-packages/ray/function_manager.py", line 445, in _load_actor_class_from_local
actor_class = getattr(module, class_name)
AttributeError: module 'mars.ray.core' has no attribute 'RemoteMetaStore'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "python/ray/_raylet.pyx", line 563, in ray._raylet.task_execution_handler
File "python/ray/_raylet.pyx", line 567, in ray._raylet.task_execution_handler
File "python/ray/_raylet.pyx", line 364, in ray._raylet.execute_task
File "/home/admin/.local/lib/python3.6/site-packages/ray/function_manager.py", line 394, in load_actor_class
job_id, actor_creation_function_descriptor)
File "/home/admin/.local/lib/python3.6/site-packages/ray/function_manager.py", line 454, in _load_actor_class_from_local
class_name))
RuntimeError: Actor RemoteMetaStore failed to be imported from local code.
An unexpected internal error occurred while the worker was executing a task.
```
**To Reproduce**
To help us reproducing this bug, please provide information below:
1. Python 3.6.5
2. pymars 0.6.0b2
3. ray 1.0.1
4. Minimal reproduce example.
``` python
from mars.session import new_session
ray_session = new_session(backend='ray', _load_code_from_local=True).as_default()
print(ray_session)
def main():
import mars.dataframe as md
import mars.tensor as mt
t = mt.random.rand(100, 4, chunk_size=30)
df = md.DataFrame(t, columns=list('abcd'))
print(df.describe().execute())
if __name__ == "__main__":
main()
```
**Expected behavior**
A clear and concise description of what you expected to happen.
**Additional context**
Add any other context about the problem here.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mars/ray/core.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 # Copyright 1999-2020 Alibaba Group Holding Ltd.
4 #
5 # Licensed under the Apache License, Version 2.0 (the "License");
6 # you may not use this file except in compliance with the License.
7 # You may obtain a copy of the License at
8 #
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # Unless required by applicable law or agreed to in writing, software
12 # distributed under the License is distributed on an "AS IS" BASIS,
13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 # See the License for the specific language governing permissions and
15 # limitations under the License.
16
17 import uuid
18 from collections import namedtuple
19 from functools import lru_cache
20 from typing import Dict
21
22 import ray
23
24 from ..graph import DAG
25 from ..operands import Fetch
26 from ..tiles import get_tiled
27 from ..utils import build_fetch_chunk
28 from ..executor import Executor, GraphExecution
29
30
31 class _OperandWrapper:
32 __slots__ = 'op', 'chunks'
33
34 def __init__(self, op, chunks):
35 """
36 As we only serde op for Ray executors, but op only weakly reference chunks,
37 So we create a wrapper here to keep the reference
38 """
39 self.op = op
40 self.chunks = chunks
41
42
43 def operand_serializer(op):
44 graph = DAG()
45 inputs = [build_fetch_chunk(inp) for inp in op.inputs or []]
46 new_op = op.copy()
47
48 kws = []
49 for c in op.outputs:
50 params = c.params.copy()
51 params['_key'] = c.key
52 params.update(c.extra_params)
53 kws.append(params)
54
55 chunks = new_op.new_chunks(inputs, kws=kws, output_limit=len(kws))
56 for obj in chunks + inputs:
57 graph.add_node(obj)
58
59 return graph.to_json()
60
61
62 def operand_deserializer(value):
63 graph = DAG.from_json(value)
64 if len(graph) == 1:
65 chunks = [list(graph)[0]]
66 else:
67 chunks = [c for c in graph if not isinstance(c.op, Fetch)]
68 op = chunks[0].op
69 return _OperandWrapper(op, chunks)
70
71
72 @lru_cache(500)
73 def _register_ray_serializer(op):
74 # register a custom serializer for Mars operand
75 try:
76 ray.register_custom_serializer(
77 type(op), serializer=operand_serializer,
78 deserializer=operand_deserializer)
79 except AttributeError: # ray >= 1.0
80 from ray.worker import global_worker
81
82 global_worker.check_connected()
83 context = global_worker.get_serialization_context()
84 context.register_custom_serializer(
85 type(op), serializer=operand_serializer,
86 deserializer=operand_deserializer)
87
88
89 class GraphExecutionForRay(GraphExecution):
90 def handle_op(self, *args, **kw):
91 return RayExecutor.handle(*args, **kw)
92
93
94 ChunkMeta = namedtuple('ChunkMeta', ['shape', 'object_id'])
95
96
97 class RayStorage:
98 """
99 `RayStorage` is a dict-like class. When executed in local, Mars executor will store chunk result in a
100 dict(chunk_key -> chunk_result), here uses Ray actor to store them as remote objects.
101 """
102
103 @ray.remote
104 class RemoteMetaStore:
105 def __init__(self):
106 self._store = dict()
107
108 def set_meta(self, chunk_key, meta):
109 self._store[chunk_key] = meta
110
111 def get_meta(self, chunk_key):
112 return self._store[chunk_key]
113
114 def get_shape(self, chunk_key):
115 return self._store[chunk_key].shape
116
117 def chunk_keys(self):
118 return list(self._store.keys())
119
120 def delete_keys(self, keys):
121 if not isinstance(keys, (list, tuple)):
122 keys = [keys]
123 for k in keys:
124 del self._store[k]
125
126 def __init__(self, meta_store=None):
127 self.meta_store = meta_store or RayStorage.RemoteMetaStore.remote()
128
129 def __getitem__(self, item):
130 meta: ChunkMeta = ray.get(self.meta_store.get_meta.remote(item))
131 return ray.get(meta.object_id)
132
133 def __setitem__(self, key, value):
134 object_id = ray.put(value)
135 shape = getattr(value, 'shape', None)
136 meta = ChunkMeta(shape=shape, object_id=object_id)
137 set_meta = self.meta_store.set_meta.remote(key, meta)
138 ray.wait([object_id, set_meta])
139
140 def copy(self):
141 return RayStorage(meta_store=self.meta_store)
142
143 def update(self, mapping: Dict):
144 tasks = []
145 for k, v in mapping.items():
146 object_id = ray.put(v)
147 tasks.append(object_id)
148 shape = getattr(v, 'shape', None)
149 meta = ChunkMeta(shape=shape, object_id=object_id)
150 set_meta = self.meta_store.set_meta.remote(k, meta)
151 tasks.append(set_meta)
152 ray.wait(tasks)
153
154 def __iter__(self):
155 return iter(ray.get(self.meta_store.chunk_keys.remote()))
156
157 def __delitem__(self, key):
158 ray.wait([self.meta_store.delete_keys.remote(key)])
159
160
161 @ray.remote
162 def execute_on_ray(func, results, op_wrapper: _OperandWrapper):
163 op = op_wrapper.op
164 func(results, op)
165
166
167 class RayExecutor(Executor):
168 """
169 Wraps the execute function as a Ray remote function, the type of `results` is `RayStorage`,
170 when operand is executed, it will fetch dependencies from a Ray actor.
171 """
172
173 _graph_execution_cls = GraphExecutionForRay
174
175 @classmethod
176 def handle(cls, op, results, mock=False):
177 method_name, mapper = ('execute', cls._op_runners) if not mock else \
178 ('estimate_size', cls._op_size_estimators)
179 try:
180 runner = mapper[type(op)]
181 except KeyError:
182 runner = getattr(op, method_name)
183
184 # register a custom serializer for Mars operand
185 _register_ray_serializer(op)
186
187 try:
188 ray.wait([execute_on_ray.remote(runner, results, op)])
189 except NotImplementedError:
190 for op_cls in mapper.keys():
191 if isinstance(op, op_cls):
192 mapper[type(op)] = mapper[op_cls]
193 runner = mapper[op_cls]
194
195 ray.wait(
196 [execute_on_ray.remote(runner, results, op)])
197 raise KeyError(f'No handler found for op: {op}')
198
199 @classmethod
200 def _get_chunk_shape(cls, chunk_key, chunk_result):
201 assert isinstance(chunk_result, RayStorage)
202 return ray.get(chunk_result.meta_store.get_shape.remote(chunk_key))
203
204
205 class RaySession:
206 """
207 Session to submit Mars job to Ray cluster.
208
209 If Ray is not initialized, kwargs will pass to initialize Ray.
210 """
211 def __init__(self, **kwargs):
212 # as we cannot serialize fuse chunk for now,
213 # we just disable numexpr for ray executor
214 engine = kwargs.pop('engine', ['numpy', 'dataframe'])
215 if not ray.is_initialized():
216 ray.init(**kwargs)
217 self._session_id = uuid.uuid4()
218 self._executor = RayExecutor(engine=engine,
219 storage=RayStorage())
220
221 @property
222 def session_id(self):
223 return self._session_id
224
225 @property
226 def executor(self):
227 return self._executor
228
229 def fetch(self, *tileables, **kw):
230 return self._executor.fetch_tileables(tileables, **kw)
231
232 def fetch_log(self, tileables, offsets=None, sizes=None): # pragma: no cover
233 raise NotImplementedError('`fetch_log` is not implemented for ray executor')
234
235 def run(self, *tileables, **kw):
236 """
237 Parallelism equals to Ray cluster CPUs.
238 """
239 if 'n_parallel' not in kw: # pragma: no cover
240 kw['n_parallel'] = ray.cluster_resources()['CPU']
241 return self._executor.execute_tileables(tileables, **kw)
242
243 def _update_tileable_shape(self, tileable):
244 from ..optimizes.tileable_graph import tileable_optimized
245
246 new_nsplits = self._executor.get_tileable_nsplits(tileable)
247 tiled = get_tiled(tileable, mapping=tileable_optimized)
248 for t in (tileable, tiled):
249 t._update_shape(tuple(sum(nsplit) for nsplit in new_nsplits))
250 tiled.nsplits = new_nsplits
251
252 def __enter__(self):
253 return self
254
255 def __exit__(self, *_):
256 self._executor = None
257
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/mars/ray/core.py b/mars/ray/core.py
--- a/mars/ray/core.py
+++ b/mars/ray/core.py
@@ -94,37 +94,37 @@
ChunkMeta = namedtuple('ChunkMeta', ['shape', 'object_id'])
-class RayStorage:
- """
- `RayStorage` is a dict-like class. When executed in local, Mars executor will store chunk result in a
- dict(chunk_key -> chunk_result), here uses Ray actor to store them as remote objects.
- """
[email protected]
+class RemoteMetaStore:
+ def __init__(self):
+ self._store = dict()
- @ray.remote
- class RemoteMetaStore:
- def __init__(self):
- self._store = dict()
+ def set_meta(self, chunk_key, meta):
+ self._store[chunk_key] = meta
- def set_meta(self, chunk_key, meta):
- self._store[chunk_key] = meta
+ def get_meta(self, chunk_key):
+ return self._store[chunk_key]
- def get_meta(self, chunk_key):
- return self._store[chunk_key]
+ def get_shape(self, chunk_key):
+ return self._store[chunk_key].shape
- def get_shape(self, chunk_key):
- return self._store[chunk_key].shape
+ def chunk_keys(self):
+ return list(self._store.keys())
- def chunk_keys(self):
- return list(self._store.keys())
+ def delete_keys(self, keys):
+ if not isinstance(keys, (list, tuple)):
+ keys = [keys]
+ for k in keys:
+ del self._store[k]
- def delete_keys(self, keys):
- if not isinstance(keys, (list, tuple)):
- keys = [keys]
- for k in keys:
- del self._store[k]
+class RayStorage:
+ """
+ `RayStorage` is a dict-like class. When executed in local, Mars executor will store chunk result in a
+ dict(chunk_key -> chunk_result), here uses Ray actor to store them as remote objects.
+ """
def __init__(self, meta_store=None):
- self.meta_store = meta_store or RayStorage.RemoteMetaStore.remote()
+ self.meta_store = meta_store or RemoteMetaStore.remote()
def __getitem__(self, item):
meta: ChunkMeta = ray.get(self.meta_store.get_meta.remote(item))
|
{"golden_diff": "diff --git a/mars/ray/core.py b/mars/ray/core.py\n--- a/mars/ray/core.py\n+++ b/mars/ray/core.py\n@@ -94,37 +94,37 @@\n ChunkMeta = namedtuple('ChunkMeta', ['shape', 'object_id'])\n \n \n-class RayStorage:\n- \"\"\"\n- `RayStorage` is a dict-like class. When executed in local, Mars executor will store chunk result in a\n- dict(chunk_key -> chunk_result), here uses Ray actor to store them as remote objects.\n- \"\"\"\[email protected]\n+class RemoteMetaStore:\n+ def __init__(self):\n+ self._store = dict()\n \n- @ray.remote\n- class RemoteMetaStore:\n- def __init__(self):\n- self._store = dict()\n+ def set_meta(self, chunk_key, meta):\n+ self._store[chunk_key] = meta\n \n- def set_meta(self, chunk_key, meta):\n- self._store[chunk_key] = meta\n+ def get_meta(self, chunk_key):\n+ return self._store[chunk_key]\n \n- def get_meta(self, chunk_key):\n- return self._store[chunk_key]\n+ def get_shape(self, chunk_key):\n+ return self._store[chunk_key].shape\n \n- def get_shape(self, chunk_key):\n- return self._store[chunk_key].shape\n+ def chunk_keys(self):\n+ return list(self._store.keys())\n \n- def chunk_keys(self):\n- return list(self._store.keys())\n+ def delete_keys(self, keys):\n+ if not isinstance(keys, (list, tuple)):\n+ keys = [keys]\n+ for k in keys:\n+ del self._store[k]\n \n- def delete_keys(self, keys):\n- if not isinstance(keys, (list, tuple)):\n- keys = [keys]\n- for k in keys:\n- del self._store[k]\n \n+class RayStorage:\n+ \"\"\"\n+ `RayStorage` is a dict-like class. When executed in local, Mars executor will store chunk result in a\n+ dict(chunk_key -> chunk_result), here uses Ray actor to store them as remote objects.\n+ \"\"\"\n def __init__(self, meta_store=None):\n- self.meta_store = meta_store or RayStorage.RemoteMetaStore.remote()\n+ self.meta_store = meta_store or RemoteMetaStore.remote()\n \n def __getitem__(self, item):\n meta: ChunkMeta = ray.get(self.meta_store.get_meta.remote(item))\n", "issue": "[BUG][mars on ray] Failed to run in ray cluster mode.\n**Describe the bug**\r\nRay cluster will load code from local (not deserialize from bytes), but mars `RemoteMetaStore` is nested in `RayStorage` class. Ray worker can't load the `RemoteMetaStore` from local code.\r\n\r\n```\r\n2020-11-17 16:48:29,349\tWARNING worker.py:1157 -- Traceback (most recent call last):\r\n File \"/home/admin/.local/lib/python3.6/site-packages/ray/function_manager.py\", line 445, in _load_actor_class_from_local\r\n actor_class = getattr(module, class_name)\r\nAttributeError: module 'mars.ray.core' has no attribute 'RemoteMetaStore'\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nTraceback (most recent call last):\r\n File \"python/ray/_raylet.pyx\", line 563, in ray._raylet.task_execution_handler\r\n File \"python/ray/_raylet.pyx\", line 567, in ray._raylet.task_execution_handler\r\n File \"python/ray/_raylet.pyx\", line 364, in ray._raylet.execute_task\r\n File \"/home/admin/.local/lib/python3.6/site-packages/ray/function_manager.py\", line 394, in load_actor_class\r\n job_id, actor_creation_function_descriptor)\r\n File \"/home/admin/.local/lib/python3.6/site-packages/ray/function_manager.py\", line 454, in _load_actor_class_from_local\r\n class_name))\r\nRuntimeError: Actor RemoteMetaStore failed to be imported from local code.\r\nAn unexpected internal error occurred while the worker was executing a task.\r\n```\r\n\r\n**To Reproduce**\r\nTo help us reproducing this bug, please provide information below:\r\n1. Python 3.6.5\r\n2. pymars 0.6.0b2\r\n3. ray 1.0.1\r\n4. Minimal reproduce example.\r\n``` python\r\nfrom mars.session import new_session\r\n\r\nray_session = new_session(backend='ray', _load_code_from_local=True).as_default()\r\nprint(ray_session)\r\n\r\n\r\ndef main():\r\n import mars.dataframe as md\r\n import mars.tensor as mt\r\n t = mt.random.rand(100, 4, chunk_size=30)\r\n df = md.DataFrame(t, columns=list('abcd'))\r\n print(df.describe().execute())\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n```\r\n\r\n**Expected behavior**\r\nA clear and concise description of what you expected to happen.\r\n\r\n**Additional context**\r\nAdd any other context about the problem here.\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Copyright 1999-2020 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport uuid\nfrom collections import namedtuple\nfrom functools import lru_cache\nfrom typing import Dict\n\nimport ray\n\nfrom ..graph import DAG\nfrom ..operands import Fetch\nfrom ..tiles import get_tiled\nfrom ..utils import build_fetch_chunk\nfrom ..executor import Executor, GraphExecution\n\n\nclass _OperandWrapper:\n __slots__ = 'op', 'chunks'\n\n def __init__(self, op, chunks):\n \"\"\"\n As we only serde op for Ray executors, but op only weakly reference chunks,\n So we create a wrapper here to keep the reference\n \"\"\"\n self.op = op\n self.chunks = chunks\n\n\ndef operand_serializer(op):\n graph = DAG()\n inputs = [build_fetch_chunk(inp) for inp in op.inputs or []]\n new_op = op.copy()\n\n kws = []\n for c in op.outputs:\n params = c.params.copy()\n params['_key'] = c.key\n params.update(c.extra_params)\n kws.append(params)\n\n chunks = new_op.new_chunks(inputs, kws=kws, output_limit=len(kws))\n for obj in chunks + inputs:\n graph.add_node(obj)\n\n return graph.to_json()\n\n\ndef operand_deserializer(value):\n graph = DAG.from_json(value)\n if len(graph) == 1:\n chunks = [list(graph)[0]]\n else:\n chunks = [c for c in graph if not isinstance(c.op, Fetch)]\n op = chunks[0].op\n return _OperandWrapper(op, chunks)\n\n\n@lru_cache(500)\ndef _register_ray_serializer(op):\n # register a custom serializer for Mars operand\n try:\n ray.register_custom_serializer(\n type(op), serializer=operand_serializer,\n deserializer=operand_deserializer)\n except AttributeError: # ray >= 1.0\n from ray.worker import global_worker\n\n global_worker.check_connected()\n context = global_worker.get_serialization_context()\n context.register_custom_serializer(\n type(op), serializer=operand_serializer,\n deserializer=operand_deserializer)\n\n\nclass GraphExecutionForRay(GraphExecution):\n def handle_op(self, *args, **kw):\n return RayExecutor.handle(*args, **kw)\n\n\nChunkMeta = namedtuple('ChunkMeta', ['shape', 'object_id'])\n\n\nclass RayStorage:\n \"\"\"\n `RayStorage` is a dict-like class. When executed in local, Mars executor will store chunk result in a\n dict(chunk_key -> chunk_result), here uses Ray actor to store them as remote objects.\n \"\"\"\n\n @ray.remote\n class RemoteMetaStore:\n def __init__(self):\n self._store = dict()\n\n def set_meta(self, chunk_key, meta):\n self._store[chunk_key] = meta\n\n def get_meta(self, chunk_key):\n return self._store[chunk_key]\n\n def get_shape(self, chunk_key):\n return self._store[chunk_key].shape\n\n def chunk_keys(self):\n return list(self._store.keys())\n\n def delete_keys(self, keys):\n if not isinstance(keys, (list, tuple)):\n keys = [keys]\n for k in keys:\n del self._store[k]\n\n def __init__(self, meta_store=None):\n self.meta_store = meta_store or RayStorage.RemoteMetaStore.remote()\n\n def __getitem__(self, item):\n meta: ChunkMeta = ray.get(self.meta_store.get_meta.remote(item))\n return ray.get(meta.object_id)\n\n def __setitem__(self, key, value):\n object_id = ray.put(value)\n shape = getattr(value, 'shape', None)\n meta = ChunkMeta(shape=shape, object_id=object_id)\n set_meta = self.meta_store.set_meta.remote(key, meta)\n ray.wait([object_id, set_meta])\n\n def copy(self):\n return RayStorage(meta_store=self.meta_store)\n\n def update(self, mapping: Dict):\n tasks = []\n for k, v in mapping.items():\n object_id = ray.put(v)\n tasks.append(object_id)\n shape = getattr(v, 'shape', None)\n meta = ChunkMeta(shape=shape, object_id=object_id)\n set_meta = self.meta_store.set_meta.remote(k, meta)\n tasks.append(set_meta)\n ray.wait(tasks)\n\n def __iter__(self):\n return iter(ray.get(self.meta_store.chunk_keys.remote()))\n\n def __delitem__(self, key):\n ray.wait([self.meta_store.delete_keys.remote(key)])\n\n\[email protected]\ndef execute_on_ray(func, results, op_wrapper: _OperandWrapper):\n op = op_wrapper.op\n func(results, op)\n\n\nclass RayExecutor(Executor):\n \"\"\"\n Wraps the execute function as a Ray remote function, the type of `results` is `RayStorage`,\n when operand is executed, it will fetch dependencies from a Ray actor.\n \"\"\"\n\n _graph_execution_cls = GraphExecutionForRay\n\n @classmethod\n def handle(cls, op, results, mock=False):\n method_name, mapper = ('execute', cls._op_runners) if not mock else \\\n ('estimate_size', cls._op_size_estimators)\n try:\n runner = mapper[type(op)]\n except KeyError:\n runner = getattr(op, method_name)\n\n # register a custom serializer for Mars operand\n _register_ray_serializer(op)\n\n try:\n ray.wait([execute_on_ray.remote(runner, results, op)])\n except NotImplementedError:\n for op_cls in mapper.keys():\n if isinstance(op, op_cls):\n mapper[type(op)] = mapper[op_cls]\n runner = mapper[op_cls]\n\n ray.wait(\n [execute_on_ray.remote(runner, results, op)])\n raise KeyError(f'No handler found for op: {op}')\n\n @classmethod\n def _get_chunk_shape(cls, chunk_key, chunk_result):\n assert isinstance(chunk_result, RayStorage)\n return ray.get(chunk_result.meta_store.get_shape.remote(chunk_key))\n\n\nclass RaySession:\n \"\"\"\n Session to submit Mars job to Ray cluster.\n\n If Ray is not initialized, kwargs will pass to initialize Ray.\n \"\"\"\n def __init__(self, **kwargs):\n # as we cannot serialize fuse chunk for now,\n # we just disable numexpr for ray executor\n engine = kwargs.pop('engine', ['numpy', 'dataframe'])\n if not ray.is_initialized():\n ray.init(**kwargs)\n self._session_id = uuid.uuid4()\n self._executor = RayExecutor(engine=engine,\n storage=RayStorage())\n\n @property\n def session_id(self):\n return self._session_id\n\n @property\n def executor(self):\n return self._executor\n\n def fetch(self, *tileables, **kw):\n return self._executor.fetch_tileables(tileables, **kw)\n\n def fetch_log(self, tileables, offsets=None, sizes=None): # pragma: no cover\n raise NotImplementedError('`fetch_log` is not implemented for ray executor')\n\n def run(self, *tileables, **kw):\n \"\"\"\n Parallelism equals to Ray cluster CPUs.\n \"\"\"\n if 'n_parallel' not in kw: # pragma: no cover\n kw['n_parallel'] = ray.cluster_resources()['CPU']\n return self._executor.execute_tileables(tileables, **kw)\n\n def _update_tileable_shape(self, tileable):\n from ..optimizes.tileable_graph import tileable_optimized\n\n new_nsplits = self._executor.get_tileable_nsplits(tileable)\n tiled = get_tiled(tileable, mapping=tileable_optimized)\n for t in (tileable, tiled):\n t._update_shape(tuple(sum(nsplit) for nsplit in new_nsplits))\n tiled.nsplits = new_nsplits\n\n def __enter__(self):\n return self\n\n def __exit__(self, *_):\n self._executor = None\n", "path": "mars/ray/core.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Copyright 1999-2020 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport uuid\nfrom collections import namedtuple\nfrom functools import lru_cache\nfrom typing import Dict\n\nimport ray\n\nfrom ..graph import DAG\nfrom ..operands import Fetch\nfrom ..tiles import get_tiled\nfrom ..utils import build_fetch_chunk\nfrom ..executor import Executor, GraphExecution\n\n\nclass _OperandWrapper:\n __slots__ = 'op', 'chunks'\n\n def __init__(self, op, chunks):\n \"\"\"\n As we only serde op for Ray executors, but op only weakly reference chunks,\n So we create a wrapper here to keep the reference\n \"\"\"\n self.op = op\n self.chunks = chunks\n\n\ndef operand_serializer(op):\n graph = DAG()\n inputs = [build_fetch_chunk(inp) for inp in op.inputs or []]\n new_op = op.copy()\n\n kws = []\n for c in op.outputs:\n params = c.params.copy()\n params['_key'] = c.key\n params.update(c.extra_params)\n kws.append(params)\n\n chunks = new_op.new_chunks(inputs, kws=kws, output_limit=len(kws))\n for obj in chunks + inputs:\n graph.add_node(obj)\n\n return graph.to_json()\n\n\ndef operand_deserializer(value):\n graph = DAG.from_json(value)\n if len(graph) == 1:\n chunks = [list(graph)[0]]\n else:\n chunks = [c for c in graph if not isinstance(c.op, Fetch)]\n op = chunks[0].op\n return _OperandWrapper(op, chunks)\n\n\n@lru_cache(500)\ndef _register_ray_serializer(op):\n # register a custom serializer for Mars operand\n try:\n ray.register_custom_serializer(\n type(op), serializer=operand_serializer,\n deserializer=operand_deserializer)\n except AttributeError: # ray >= 1.0\n from ray.worker import global_worker\n\n global_worker.check_connected()\n context = global_worker.get_serialization_context()\n context.register_custom_serializer(\n type(op), serializer=operand_serializer,\n deserializer=operand_deserializer)\n\n\nclass GraphExecutionForRay(GraphExecution):\n def handle_op(self, *args, **kw):\n return RayExecutor.handle(*args, **kw)\n\n\nChunkMeta = namedtuple('ChunkMeta', ['shape', 'object_id'])\n\n\[email protected]\nclass RemoteMetaStore:\n def __init__(self):\n self._store = dict()\n\n def set_meta(self, chunk_key, meta):\n self._store[chunk_key] = meta\n\n def get_meta(self, chunk_key):\n return self._store[chunk_key]\n\n def get_shape(self, chunk_key):\n return self._store[chunk_key].shape\n\n def chunk_keys(self):\n return list(self._store.keys())\n\n def delete_keys(self, keys):\n if not isinstance(keys, (list, tuple)):\n keys = [keys]\n for k in keys:\n del self._store[k]\n\n\nclass RayStorage:\n \"\"\"\n `RayStorage` is a dict-like class. When executed in local, Mars executor will store chunk result in a\n dict(chunk_key -> chunk_result), here uses Ray actor to store them as remote objects.\n \"\"\"\n def __init__(self, meta_store=None):\n self.meta_store = meta_store or RemoteMetaStore.remote()\n\n def __getitem__(self, item):\n meta: ChunkMeta = ray.get(self.meta_store.get_meta.remote(item))\n return ray.get(meta.object_id)\n\n def __setitem__(self, key, value):\n object_id = ray.put(value)\n shape = getattr(value, 'shape', None)\n meta = ChunkMeta(shape=shape, object_id=object_id)\n set_meta = self.meta_store.set_meta.remote(key, meta)\n ray.wait([object_id, set_meta])\n\n def copy(self):\n return RayStorage(meta_store=self.meta_store)\n\n def update(self, mapping: Dict):\n tasks = []\n for k, v in mapping.items():\n object_id = ray.put(v)\n tasks.append(object_id)\n shape = getattr(v, 'shape', None)\n meta = ChunkMeta(shape=shape, object_id=object_id)\n set_meta = self.meta_store.set_meta.remote(k, meta)\n tasks.append(set_meta)\n ray.wait(tasks)\n\n def __iter__(self):\n return iter(ray.get(self.meta_store.chunk_keys.remote()))\n\n def __delitem__(self, key):\n ray.wait([self.meta_store.delete_keys.remote(key)])\n\n\[email protected]\ndef execute_on_ray(func, results, op_wrapper: _OperandWrapper):\n op = op_wrapper.op\n func(results, op)\n\n\nclass RayExecutor(Executor):\n \"\"\"\n Wraps the execute function as a Ray remote function, the type of `results` is `RayStorage`,\n when operand is executed, it will fetch dependencies from a Ray actor.\n \"\"\"\n\n _graph_execution_cls = GraphExecutionForRay\n\n @classmethod\n def handle(cls, op, results, mock=False):\n method_name, mapper = ('execute', cls._op_runners) if not mock else \\\n ('estimate_size', cls._op_size_estimators)\n try:\n runner = mapper[type(op)]\n except KeyError:\n runner = getattr(op, method_name)\n\n # register a custom serializer for Mars operand\n _register_ray_serializer(op)\n\n try:\n ray.wait([execute_on_ray.remote(runner, results, op)])\n except NotImplementedError:\n for op_cls in mapper.keys():\n if isinstance(op, op_cls):\n mapper[type(op)] = mapper[op_cls]\n runner = mapper[op_cls]\n\n ray.wait(\n [execute_on_ray.remote(runner, results, op)])\n raise KeyError(f'No handler found for op: {op}')\n\n @classmethod\n def _get_chunk_shape(cls, chunk_key, chunk_result):\n assert isinstance(chunk_result, RayStorage)\n return ray.get(chunk_result.meta_store.get_shape.remote(chunk_key))\n\n\nclass RaySession:\n \"\"\"\n Session to submit Mars job to Ray cluster.\n\n If Ray is not initialized, kwargs will pass to initialize Ray.\n \"\"\"\n def __init__(self, **kwargs):\n # as we cannot serialize fuse chunk for now,\n # we just disable numexpr for ray executor\n engine = kwargs.pop('engine', ['numpy', 'dataframe'])\n if not ray.is_initialized():\n ray.init(**kwargs)\n self._session_id = uuid.uuid4()\n self._executor = RayExecutor(engine=engine,\n storage=RayStorage())\n\n @property\n def session_id(self):\n return self._session_id\n\n @property\n def executor(self):\n return self._executor\n\n def fetch(self, *tileables, **kw):\n return self._executor.fetch_tileables(tileables, **kw)\n\n def fetch_log(self, tileables, offsets=None, sizes=None): # pragma: no cover\n raise NotImplementedError('`fetch_log` is not implemented for ray executor')\n\n def run(self, *tileables, **kw):\n \"\"\"\n Parallelism equals to Ray cluster CPUs.\n \"\"\"\n if 'n_parallel' not in kw: # pragma: no cover\n kw['n_parallel'] = ray.cluster_resources()['CPU']\n return self._executor.execute_tileables(tileables, **kw)\n\n def _update_tileable_shape(self, tileable):\n from ..optimizes.tileable_graph import tileable_optimized\n\n new_nsplits = self._executor.get_tileable_nsplits(tileable)\n tiled = get_tiled(tileable, mapping=tileable_optimized)\n for t in (tileable, tiled):\n t._update_shape(tuple(sum(nsplit) for nsplit in new_nsplits))\n tiled.nsplits = new_nsplits\n\n def __enter__(self):\n return self\n\n def __exit__(self, *_):\n self._executor = None\n", "path": "mars/ray/core.py"}]}
| 3,350 | 557 |
gh_patches_debug_30182
|
rasdani/github-patches
|
git_diff
|
pytorch__TensorRT-2519
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add support for `aten.slice_scatter`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `py/torch_tensorrt/dynamo/lowering/_decompositions.py`
Content:
```
1 import logging
2 from typing import Any, Callable, Dict, List, Optional
3
4 import torch
5 from torch._decomp import register_decomposition
6 from torch._ops import OpOverload
7
8 from ._decomposition_groups import (
9 ENABLED_TORCH_DECOMPOSITIONS,
10 TORCH_TRT_DECOMPOSITIONS,
11 _core_aten_decompositions,
12 aten,
13 torch_disabled_decompositions,
14 torch_enabled_decompositions,
15 )
16
17 logger = logging.getLogger(__name__)
18
19
20 def register_torch_trt_decomposition(
21 aten_op: OpOverload, registry: Optional[Any] = None
22 ) -> Callable[[Any], Any]:
23 """Checks if the decomposition already exists in one of the sets
24 Registers the decomposition via the Torch utility
25
26 Alerts the user if the decomposition already exists, before registering
27 Throws an AssertionError if the user attempts to register a decomposition
28 which is present in the set of explicitly disabled decompositions
29 """
30 if aten_op in torch_enabled_decompositions:
31 logger.warning(
32 f"Detected custom decomposition for {aten_op}, which conflicts "
33 "with an existing Torch decomposition in torch_enabled_decompositions. "
34 "The custom implementation will take precedence."
35 )
36 elif aten_op in torch_disabled_decompositions:
37 logger.info(
38 f"Detected custom decomposition for {aten_op}, which is present "
39 "in torch_disabled_decompositions."
40 )
41
42 # Conflicts with _core_aten_decompositions will only occur if
43 # enable_experimental_decompositions is True in get_decompositions
44 if aten_op in _core_aten_decompositions:
45 logger.debug(
46 f"Detected custom decomposition for {aten_op}, which conflicts "
47 "with an existing Torch decomposition in core_aten_decompositions. "
48 "The custom implementation will take precedence."
49 )
50
51 def register(fn: Callable[[Any], Any]) -> Any:
52 return register_decomposition(aten_op=aten_op, registry=registry)(fn)
53
54 return register
55
56
57 def replace_inplace_op(aten_op: OpOverload, outplace_op: OpOverload) -> Any:
58 """Replace inplace operation with functional equivalent
59 Adapted from:
60 https://github.com/pytorch/pytorch/blob/3344d79e3f732dadd5c85b99a7aa1a022f187929/torch/_decomp/decompositions.py#L3355-L3361
61 """
62
63 @register_torch_trt_decomposition(aten_op, registry=TORCH_TRT_DECOMPOSITIONS)
64 def inplace_op(*args, **kwargs): # type: ignore
65 out = outplace_op(*args, **kwargs)
66 return args[0].copy_(out)
67
68 return inplace_op
69
70
71 replace_inplace_op(aten.add_, aten.add)
72 replace_inplace_op(aten.addbmm_, aten.addbmm)
73 replace_inplace_op(aten.addmm_, aten.addmm)
74 replace_inplace_op(aten.addmv_, aten.addmv)
75 replace_inplace_op(aten.baddbmm_, aten.baddbmm)
76 replace_inplace_op(aten.cumprod_, aten.cumprod)
77 replace_inplace_op(aten.index_put_, aten.index_put)
78 replace_inplace_op(aten.index_reduce_, aten.index_reduce)
79 replace_inplace_op(aten.relu_, aten.relu)
80 replace_inplace_op(aten.round_, aten.round)
81 replace_inplace_op(aten.scatter_, aten.scatter)
82 replace_inplace_op(aten.scatter_add_, aten.scatter_add)
83 replace_inplace_op(aten.scatter_reduce_, aten.scatter_reduce)
84
85
86 @register_torch_trt_decomposition(aten.rsqrt, registry=TORCH_TRT_DECOMPOSITIONS)
87 def rsqrt_replacement(*args, **kwargs) -> torch.Tensor: # type: ignore
88 return torch.reciprocal(torch.sqrt(*args, **kwargs))
89
90
91 @register_torch_trt_decomposition(aten._unsafe_view, registry=TORCH_TRT_DECOMPOSITIONS)
92 def unsafe_view_replacement(x: torch.Tensor, *args, **kwargs) -> torch.Tensor: # type: ignore
93 return torch.reshape(x, *args, **kwargs)
94
95
96 @register_torch_trt_decomposition(
97 torch.ops.aten.lift_fresh_copy, registry=TORCH_TRT_DECOMPOSITIONS
98 )
99 def lift_fresh_copy_replacement(x: torch.Tensor) -> torch.Tensor:
100 return x
101
102
103 @register_torch_trt_decomposition(aten.alias, registry=TORCH_TRT_DECOMPOSITIONS)
104 def alias_replacement(x: torch.Tensor) -> torch.Tensor:
105 return x
106
107
108 @register_torch_trt_decomposition(
109 torch.ops.aten.reciprocal.default, registry=TORCH_TRT_DECOMPOSITIONS
110 )
111 def reciprocal_replacement(
112 input_: torch.Tensor,
113 ) -> torch.Tensor:
114 return torch.div(1, input_)
115
116
117 @register_torch_trt_decomposition(
118 torch.ops.prims.var.default, registry=TORCH_TRT_DECOMPOSITIONS
119 )
120 def var_decomposition(
121 input_tensor: torch.Tensor,
122 dims: Optional[List[int]],
123 correction: int,
124 output_dtype: Optional[torch.dtype] = None,
125 ) -> torch.Tensor:
126 if dims is None:
127 dims = []
128
129 # If the dimensions are empty, variance is taken over all dimensions
130 if isinstance(dims, (tuple, list)) and len(dims) == 0:
131 N = input_tensor.numel()
132 # Otherwise, the number of samples is the product of the dimensions reduced over
133 else:
134 N = 1
135 for dim_i in dims:
136 N *= input_tensor.shape[dim_i]
137
138 # Compute the mean, difference, and correction term as per the formula:
139 # https://pytorch.org/docs/stable/generated/torch.var.html
140
141 # Additionally, prims does not support keepdim, and so we only keep dimensions
142 # on the first reduction, then remove it for the second
143 sample_mean = torch.mean(input_tensor, dims, keepdim=True)
144 diff = input_tensor - sample_mean
145 squared_diff = diff * diff
146 variance_unnormalized = torch.sum(squared_diff, dims, keepdim=False)
147
148 if correction is None:
149 correction_term = float(N - 1)
150 elif isinstance(correction, int):
151 correction_term = float(N - correction)
152 elif isinstance(correction, float):
153 correction_term = float(N) - correction
154 else:
155 raise RuntimeError("correction must be int or float")
156
157 if correction_term <= 0:
158 raise RuntimeError(f"correction term was non-positive, got: {correction_term}")
159
160 variance = variance_unnormalized / correction_term
161
162 return variance
163
164
165 @register_torch_trt_decomposition(
166 torch.ops.aten.empty_permuted.default, registry=TORCH_TRT_DECOMPOSITIONS
167 )
168 def empty_permuted_decomposition(*args, **kwargs) -> torch.Tensor:
169 empty_size = args[0]
170 empty_permute = args[1]
171 perm = [0] * len(empty_size)
172 for permute_index, permute_element in enumerate(empty_permute):
173 perm[permute_element] = permute_index
174 return torch.empty([empty_size[l] for l in empty_permute], **kwargs).permute(perm)
175
176
177 def get_decompositions(
178 enable_experimental_decompositions: bool = False,
179 ) -> Dict[OpOverload, Callable[[Any], Any]]:
180 if enable_experimental_decompositions:
181 CORE_ATEN_DECOMPOSITIONS_FILTERED: Dict[OpOverload, Callable[[Any], Any]] = {
182 decomp: _core_aten_decompositions[decomp]
183 for decomp in _core_aten_decompositions
184 if decomp not in torch_disabled_decompositions
185 }
186 return {**CORE_ATEN_DECOMPOSITIONS_FILTERED, **TORCH_TRT_DECOMPOSITIONS}
187 else:
188 return {**ENABLED_TORCH_DECOMPOSITIONS, **TORCH_TRT_DECOMPOSITIONS}
189
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/py/torch_tensorrt/dynamo/lowering/_decompositions.py b/py/torch_tensorrt/dynamo/lowering/_decompositions.py
--- a/py/torch_tensorrt/dynamo/lowering/_decompositions.py
+++ b/py/torch_tensorrt/dynamo/lowering/_decompositions.py
@@ -4,6 +4,7 @@
import torch
from torch._decomp import register_decomposition
from torch._ops import OpOverload
+from torch_tensorrt.dynamo.conversion.converter_utils import get_positive_dim
from ._decomposition_groups import (
ENABLED_TORCH_DECOMPOSITIONS,
@@ -174,6 +175,44 @@
return torch.empty([empty_size[l] for l in empty_permute], **kwargs).permute(perm)
+@register_torch_trt_decomposition(
+ torch.ops.aten.slice_scatter.default, registry=TORCH_TRT_DECOMPOSITIONS
+)
+def slice_scatter_decomposition(
+ input_tensor: torch.Tensor,
+ src_tensor: torch.Tensor,
+ dim: int,
+ start: Optional[int] = None,
+ end: Optional[int] = None,
+ step: Optional[int] = None,
+):
+ dim_size = input_tensor.shape[dim]
+ start = get_positive_dim(start, input_tensor.shape[dim])
+ if end is None:
+ end = dim_size
+ end = get_positive_dim(end, input_tensor.shape[dim])
+ if step is None:
+ step = 1
+
+ src_dim = src_tensor.shape
+ # step == 0 is not a valid torch case
+ # also src_dim should be equal to slice dimension
+
+ if start == 0 and end == dim_size and step == 1:
+ return src_tensor
+
+ cat_tensors = []
+ index_tensor_shape = []
+ for i, src_each_dim in enumerate(list(src_dim)):
+ if i != dim:
+ index_tensor_shape.append(src_each_dim)
+ for index in range(start, end, step):
+ cat_tensors.append(index * torch.ones(index_tensor_shape, dtype=torch.long))
+ index_tensor = torch.stack(cat_tensors, dim).cuda()
+ output_tensor = torch.scatter(input_tensor, dim, index_tensor, src_tensor)
+ return output_tensor
+
+
def get_decompositions(
enable_experimental_decompositions: bool = False,
) -> Dict[OpOverload, Callable[[Any], Any]]:
|
{"golden_diff": "diff --git a/py/torch_tensorrt/dynamo/lowering/_decompositions.py b/py/torch_tensorrt/dynamo/lowering/_decompositions.py\n--- a/py/torch_tensorrt/dynamo/lowering/_decompositions.py\n+++ b/py/torch_tensorrt/dynamo/lowering/_decompositions.py\n@@ -4,6 +4,7 @@\n import torch\n from torch._decomp import register_decomposition\n from torch._ops import OpOverload\n+from torch_tensorrt.dynamo.conversion.converter_utils import get_positive_dim\n \n from ._decomposition_groups import (\n ENABLED_TORCH_DECOMPOSITIONS,\n@@ -174,6 +175,44 @@\n return torch.empty([empty_size[l] for l in empty_permute], **kwargs).permute(perm)\n \n \n+@register_torch_trt_decomposition(\n+ torch.ops.aten.slice_scatter.default, registry=TORCH_TRT_DECOMPOSITIONS\n+)\n+def slice_scatter_decomposition(\n+ input_tensor: torch.Tensor,\n+ src_tensor: torch.Tensor,\n+ dim: int,\n+ start: Optional[int] = None,\n+ end: Optional[int] = None,\n+ step: Optional[int] = None,\n+):\n+ dim_size = input_tensor.shape[dim]\n+ start = get_positive_dim(start, input_tensor.shape[dim])\n+ if end is None:\n+ end = dim_size\n+ end = get_positive_dim(end, input_tensor.shape[dim])\n+ if step is None:\n+ step = 1\n+\n+ src_dim = src_tensor.shape\n+ # step == 0 is not a valid torch case\n+ # also src_dim should be equal to slice dimension\n+\n+ if start == 0 and end == dim_size and step == 1:\n+ return src_tensor\n+\n+ cat_tensors = []\n+ index_tensor_shape = []\n+ for i, src_each_dim in enumerate(list(src_dim)):\n+ if i != dim:\n+ index_tensor_shape.append(src_each_dim)\n+ for index in range(start, end, step):\n+ cat_tensors.append(index * torch.ones(index_tensor_shape, dtype=torch.long))\n+ index_tensor = torch.stack(cat_tensors, dim).cuda()\n+ output_tensor = torch.scatter(input_tensor, dim, index_tensor, src_tensor)\n+ return output_tensor\n+\n+\n def get_decompositions(\n enable_experimental_decompositions: bool = False,\n ) -> Dict[OpOverload, Callable[[Any], Any]]:\n", "issue": "Add support for `aten.slice_scatter`\n\n", "before_files": [{"content": "import logging\nfrom typing import Any, Callable, Dict, List, Optional\n\nimport torch\nfrom torch._decomp import register_decomposition\nfrom torch._ops import OpOverload\n\nfrom ._decomposition_groups import (\n ENABLED_TORCH_DECOMPOSITIONS,\n TORCH_TRT_DECOMPOSITIONS,\n _core_aten_decompositions,\n aten,\n torch_disabled_decompositions,\n torch_enabled_decompositions,\n)\n\nlogger = logging.getLogger(__name__)\n\n\ndef register_torch_trt_decomposition(\n aten_op: OpOverload, registry: Optional[Any] = None\n) -> Callable[[Any], Any]:\n \"\"\"Checks if the decomposition already exists in one of the sets\n Registers the decomposition via the Torch utility\n\n Alerts the user if the decomposition already exists, before registering\n Throws an AssertionError if the user attempts to register a decomposition\n which is present in the set of explicitly disabled decompositions\n \"\"\"\n if aten_op in torch_enabled_decompositions:\n logger.warning(\n f\"Detected custom decomposition for {aten_op}, which conflicts \"\n \"with an existing Torch decomposition in torch_enabled_decompositions. \"\n \"The custom implementation will take precedence.\"\n )\n elif aten_op in torch_disabled_decompositions:\n logger.info(\n f\"Detected custom decomposition for {aten_op}, which is present \"\n \"in torch_disabled_decompositions.\"\n )\n\n # Conflicts with _core_aten_decompositions will only occur if\n # enable_experimental_decompositions is True in get_decompositions\n if aten_op in _core_aten_decompositions:\n logger.debug(\n f\"Detected custom decomposition for {aten_op}, which conflicts \"\n \"with an existing Torch decomposition in core_aten_decompositions. \"\n \"The custom implementation will take precedence.\"\n )\n\n def register(fn: Callable[[Any], Any]) -> Any:\n return register_decomposition(aten_op=aten_op, registry=registry)(fn)\n\n return register\n\n\ndef replace_inplace_op(aten_op: OpOverload, outplace_op: OpOverload) -> Any:\n \"\"\"Replace inplace operation with functional equivalent\n Adapted from:\n https://github.com/pytorch/pytorch/blob/3344d79e3f732dadd5c85b99a7aa1a022f187929/torch/_decomp/decompositions.py#L3355-L3361\n \"\"\"\n\n @register_torch_trt_decomposition(aten_op, registry=TORCH_TRT_DECOMPOSITIONS)\n def inplace_op(*args, **kwargs): # type: ignore\n out = outplace_op(*args, **kwargs)\n return args[0].copy_(out)\n\n return inplace_op\n\n\nreplace_inplace_op(aten.add_, aten.add)\nreplace_inplace_op(aten.addbmm_, aten.addbmm)\nreplace_inplace_op(aten.addmm_, aten.addmm)\nreplace_inplace_op(aten.addmv_, aten.addmv)\nreplace_inplace_op(aten.baddbmm_, aten.baddbmm)\nreplace_inplace_op(aten.cumprod_, aten.cumprod)\nreplace_inplace_op(aten.index_put_, aten.index_put)\nreplace_inplace_op(aten.index_reduce_, aten.index_reduce)\nreplace_inplace_op(aten.relu_, aten.relu)\nreplace_inplace_op(aten.round_, aten.round)\nreplace_inplace_op(aten.scatter_, aten.scatter)\nreplace_inplace_op(aten.scatter_add_, aten.scatter_add)\nreplace_inplace_op(aten.scatter_reduce_, aten.scatter_reduce)\n\n\n@register_torch_trt_decomposition(aten.rsqrt, registry=TORCH_TRT_DECOMPOSITIONS)\ndef rsqrt_replacement(*args, **kwargs) -> torch.Tensor: # type: ignore\n return torch.reciprocal(torch.sqrt(*args, **kwargs))\n\n\n@register_torch_trt_decomposition(aten._unsafe_view, registry=TORCH_TRT_DECOMPOSITIONS)\ndef unsafe_view_replacement(x: torch.Tensor, *args, **kwargs) -> torch.Tensor: # type: ignore\n return torch.reshape(x, *args, **kwargs)\n\n\n@register_torch_trt_decomposition(\n torch.ops.aten.lift_fresh_copy, registry=TORCH_TRT_DECOMPOSITIONS\n)\ndef lift_fresh_copy_replacement(x: torch.Tensor) -> torch.Tensor:\n return x\n\n\n@register_torch_trt_decomposition(aten.alias, registry=TORCH_TRT_DECOMPOSITIONS)\ndef alias_replacement(x: torch.Tensor) -> torch.Tensor:\n return x\n\n\n@register_torch_trt_decomposition(\n torch.ops.aten.reciprocal.default, registry=TORCH_TRT_DECOMPOSITIONS\n)\ndef reciprocal_replacement(\n input_: torch.Tensor,\n) -> torch.Tensor:\n return torch.div(1, input_)\n\n\n@register_torch_trt_decomposition(\n torch.ops.prims.var.default, registry=TORCH_TRT_DECOMPOSITIONS\n)\ndef var_decomposition(\n input_tensor: torch.Tensor,\n dims: Optional[List[int]],\n correction: int,\n output_dtype: Optional[torch.dtype] = None,\n) -> torch.Tensor:\n if dims is None:\n dims = []\n\n # If the dimensions are empty, variance is taken over all dimensions\n if isinstance(dims, (tuple, list)) and len(dims) == 0:\n N = input_tensor.numel()\n # Otherwise, the number of samples is the product of the dimensions reduced over\n else:\n N = 1\n for dim_i in dims:\n N *= input_tensor.shape[dim_i]\n\n # Compute the mean, difference, and correction term as per the formula:\n # https://pytorch.org/docs/stable/generated/torch.var.html\n\n # Additionally, prims does not support keepdim, and so we only keep dimensions\n # on the first reduction, then remove it for the second\n sample_mean = torch.mean(input_tensor, dims, keepdim=True)\n diff = input_tensor - sample_mean\n squared_diff = diff * diff\n variance_unnormalized = torch.sum(squared_diff, dims, keepdim=False)\n\n if correction is None:\n correction_term = float(N - 1)\n elif isinstance(correction, int):\n correction_term = float(N - correction)\n elif isinstance(correction, float):\n correction_term = float(N) - correction\n else:\n raise RuntimeError(\"correction must be int or float\")\n\n if correction_term <= 0:\n raise RuntimeError(f\"correction term was non-positive, got: {correction_term}\")\n\n variance = variance_unnormalized / correction_term\n\n return variance\n\n\n@register_torch_trt_decomposition(\n torch.ops.aten.empty_permuted.default, registry=TORCH_TRT_DECOMPOSITIONS\n)\ndef empty_permuted_decomposition(*args, **kwargs) -> torch.Tensor:\n empty_size = args[0]\n empty_permute = args[1]\n perm = [0] * len(empty_size)\n for permute_index, permute_element in enumerate(empty_permute):\n perm[permute_element] = permute_index\n return torch.empty([empty_size[l] for l in empty_permute], **kwargs).permute(perm)\n\n\ndef get_decompositions(\n enable_experimental_decompositions: bool = False,\n) -> Dict[OpOverload, Callable[[Any], Any]]:\n if enable_experimental_decompositions:\n CORE_ATEN_DECOMPOSITIONS_FILTERED: Dict[OpOverload, Callable[[Any], Any]] = {\n decomp: _core_aten_decompositions[decomp]\n for decomp in _core_aten_decompositions\n if decomp not in torch_disabled_decompositions\n }\n return {**CORE_ATEN_DECOMPOSITIONS_FILTERED, **TORCH_TRT_DECOMPOSITIONS}\n else:\n return {**ENABLED_TORCH_DECOMPOSITIONS, **TORCH_TRT_DECOMPOSITIONS}\n", "path": "py/torch_tensorrt/dynamo/lowering/_decompositions.py"}], "after_files": [{"content": "import logging\nfrom typing import Any, Callable, Dict, List, Optional\n\nimport torch\nfrom torch._decomp import register_decomposition\nfrom torch._ops import OpOverload\nfrom torch_tensorrt.dynamo.conversion.converter_utils import get_positive_dim\n\nfrom ._decomposition_groups import (\n ENABLED_TORCH_DECOMPOSITIONS,\n TORCH_TRT_DECOMPOSITIONS,\n _core_aten_decompositions,\n aten,\n torch_disabled_decompositions,\n torch_enabled_decompositions,\n)\n\nlogger = logging.getLogger(__name__)\n\n\ndef register_torch_trt_decomposition(\n aten_op: OpOverload, registry: Optional[Any] = None\n) -> Callable[[Any], Any]:\n \"\"\"Checks if the decomposition already exists in one of the sets\n Registers the decomposition via the Torch utility\n\n Alerts the user if the decomposition already exists, before registering\n Throws an AssertionError if the user attempts to register a decomposition\n which is present in the set of explicitly disabled decompositions\n \"\"\"\n if aten_op in torch_enabled_decompositions:\n logger.warning(\n f\"Detected custom decomposition for {aten_op}, which conflicts \"\n \"with an existing Torch decomposition in torch_enabled_decompositions. \"\n \"The custom implementation will take precedence.\"\n )\n elif aten_op in torch_disabled_decompositions:\n logger.info(\n f\"Detected custom decomposition for {aten_op}, which is present \"\n \"in torch_disabled_decompositions.\"\n )\n\n # Conflicts with _core_aten_decompositions will only occur if\n # enable_experimental_decompositions is True in get_decompositions\n if aten_op in _core_aten_decompositions:\n logger.debug(\n f\"Detected custom decomposition for {aten_op}, which conflicts \"\n \"with an existing Torch decomposition in core_aten_decompositions. \"\n \"The custom implementation will take precedence.\"\n )\n\n def register(fn: Callable[[Any], Any]) -> Any:\n return register_decomposition(aten_op=aten_op, registry=registry)(fn)\n\n return register\n\n\ndef replace_inplace_op(aten_op: OpOverload, outplace_op: OpOverload) -> Any:\n \"\"\"Replace inplace operation with functional equivalent\n Adapted from:\n https://github.com/pytorch/pytorch/blob/3344d79e3f732dadd5c85b99a7aa1a022f187929/torch/_decomp/decompositions.py#L3355-L3361\n \"\"\"\n\n @register_torch_trt_decomposition(aten_op, registry=TORCH_TRT_DECOMPOSITIONS)\n def inplace_op(*args, **kwargs): # type: ignore\n out = outplace_op(*args, **kwargs)\n return args[0].copy_(out)\n\n return inplace_op\n\n\nreplace_inplace_op(aten.add_, aten.add)\nreplace_inplace_op(aten.addbmm_, aten.addbmm)\nreplace_inplace_op(aten.addmm_, aten.addmm)\nreplace_inplace_op(aten.addmv_, aten.addmv)\nreplace_inplace_op(aten.baddbmm_, aten.baddbmm)\nreplace_inplace_op(aten.cumprod_, aten.cumprod)\nreplace_inplace_op(aten.index_put_, aten.index_put)\nreplace_inplace_op(aten.index_reduce_, aten.index_reduce)\nreplace_inplace_op(aten.relu_, aten.relu)\nreplace_inplace_op(aten.round_, aten.round)\nreplace_inplace_op(aten.scatter_, aten.scatter)\nreplace_inplace_op(aten.scatter_add_, aten.scatter_add)\nreplace_inplace_op(aten.scatter_reduce_, aten.scatter_reduce)\n\n\n@register_torch_trt_decomposition(aten.rsqrt, registry=TORCH_TRT_DECOMPOSITIONS)\ndef rsqrt_replacement(*args, **kwargs) -> torch.Tensor: # type: ignore\n return torch.reciprocal(torch.sqrt(*args, **kwargs))\n\n\n@register_torch_trt_decomposition(aten._unsafe_view, registry=TORCH_TRT_DECOMPOSITIONS)\ndef unsafe_view_replacement(x: torch.Tensor, *args, **kwargs) -> torch.Tensor: # type: ignore\n return torch.reshape(x, *args, **kwargs)\n\n\n@register_torch_trt_decomposition(\n torch.ops.aten.lift_fresh_copy, registry=TORCH_TRT_DECOMPOSITIONS\n)\ndef lift_fresh_copy_replacement(x: torch.Tensor) -> torch.Tensor:\n return x\n\n\n@register_torch_trt_decomposition(aten.alias, registry=TORCH_TRT_DECOMPOSITIONS)\ndef alias_replacement(x: torch.Tensor) -> torch.Tensor:\n return x\n\n\n@register_torch_trt_decomposition(\n torch.ops.aten.reciprocal.default, registry=TORCH_TRT_DECOMPOSITIONS\n)\ndef reciprocal_replacement(\n input_: torch.Tensor,\n) -> torch.Tensor:\n return torch.div(1, input_)\n\n\n@register_torch_trt_decomposition(\n torch.ops.prims.var.default, registry=TORCH_TRT_DECOMPOSITIONS\n)\ndef var_decomposition(\n input_tensor: torch.Tensor,\n dims: Optional[List[int]],\n correction: int,\n output_dtype: Optional[torch.dtype] = None,\n) -> torch.Tensor:\n if dims is None:\n dims = []\n\n # If the dimensions are empty, variance is taken over all dimensions\n if isinstance(dims, (tuple, list)) and len(dims) == 0:\n N = input_tensor.numel()\n # Otherwise, the number of samples is the product of the dimensions reduced over\n else:\n N = 1\n for dim_i in dims:\n N *= input_tensor.shape[dim_i]\n\n # Compute the mean, difference, and correction term as per the formula:\n # https://pytorch.org/docs/stable/generated/torch.var.html\n\n # Additionally, prims does not support keepdim, and so we only keep dimensions\n # on the first reduction, then remove it for the second\n sample_mean = torch.mean(input_tensor, dims, keepdim=True)\n diff = input_tensor - sample_mean\n squared_diff = diff * diff\n variance_unnormalized = torch.sum(squared_diff, dims, keepdim=False)\n\n if correction is None:\n correction_term = float(N - 1)\n elif isinstance(correction, int):\n correction_term = float(N - correction)\n elif isinstance(correction, float):\n correction_term = float(N) - correction\n else:\n raise RuntimeError(\"correction must be int or float\")\n\n if correction_term <= 0:\n raise RuntimeError(f\"correction term was non-positive, got: {correction_term}\")\n\n variance = variance_unnormalized / correction_term\n\n return variance\n\n\n@register_torch_trt_decomposition(\n torch.ops.aten.empty_permuted.default, registry=TORCH_TRT_DECOMPOSITIONS\n)\ndef empty_permuted_decomposition(*args, **kwargs) -> torch.Tensor:\n empty_size = args[0]\n empty_permute = args[1]\n perm = [0] * len(empty_size)\n for permute_index, permute_element in enumerate(empty_permute):\n perm[permute_element] = permute_index\n return torch.empty([empty_size[l] for l in empty_permute], **kwargs).permute(perm)\n\n\n@register_torch_trt_decomposition(\n torch.ops.aten.slice_scatter.default, registry=TORCH_TRT_DECOMPOSITIONS\n)\ndef slice_scatter_decomposition(\n input_tensor: torch.Tensor,\n src_tensor: torch.Tensor,\n dim: int,\n start: Optional[int] = None,\n end: Optional[int] = None,\n step: Optional[int] = None,\n):\n dim_size = input_tensor.shape[dim]\n start = get_positive_dim(start, input_tensor.shape[dim])\n if end is None:\n end = dim_size\n end = get_positive_dim(end, input_tensor.shape[dim])\n if step is None:\n step = 1\n\n src_dim = src_tensor.shape\n # step == 0 is not a valid torch case\n # also src_dim should be equal to slice dimension\n\n if start == 0 and end == dim_size and step == 1:\n return src_tensor\n\n cat_tensors = []\n index_tensor_shape = []\n for i, src_each_dim in enumerate(list(src_dim)):\n if i != dim:\n index_tensor_shape.append(src_each_dim)\n for index in range(start, end, step):\n cat_tensors.append(index * torch.ones(index_tensor_shape, dtype=torch.long))\n index_tensor = torch.stack(cat_tensors, dim).cuda()\n output_tensor = torch.scatter(input_tensor, dim, index_tensor, src_tensor)\n return output_tensor\n\n\ndef get_decompositions(\n enable_experimental_decompositions: bool = False,\n) -> Dict[OpOverload, Callable[[Any], Any]]:\n if enable_experimental_decompositions:\n CORE_ATEN_DECOMPOSITIONS_FILTERED: Dict[OpOverload, Callable[[Any], Any]] = {\n decomp: _core_aten_decompositions[decomp]\n for decomp in _core_aten_decompositions\n if decomp not in torch_disabled_decompositions\n }\n return {**CORE_ATEN_DECOMPOSITIONS_FILTERED, **TORCH_TRT_DECOMPOSITIONS}\n else:\n return {**ENABLED_TORCH_DECOMPOSITIONS, **TORCH_TRT_DECOMPOSITIONS}\n", "path": "py/torch_tensorrt/dynamo/lowering/_decompositions.py"}]}
| 2,499 | 544 |
gh_patches_debug_5802
|
rasdani/github-patches
|
git_diff
|
akvo__akvo-rsr-4094
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Investigate creation of duplicate user accounts with differently cased emails
- [ ] Verify that lookups using email are using `__iexact` or something like that.
- [ ] Figure out a plan for existing duplicates
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `akvo/rest/views/utils.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 # Akvo RSR is covered by the GNU Affero General Public License.
4
5 # See more details in the license.txt file located at the root folder of the Akvo RSR module.
6 # For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.
7
8 from django.conf import settings
9 from django.contrib.auth import get_user_model
10 from django.core.cache import cache
11 from django.utils.cache import get_cache_key, _generate_cache_header_key
12 from django.db import IntegrityError
13
14
15 def get_cached_data(request, key_prefix, data, serializer):
16 """Function to get serialized data from the cache based on the request."""
17 cache_header_key = _generate_cache_header_key(key_prefix, request)
18 if cache.get(cache_header_key) is None:
19 cache.set(cache_header_key, [], None)
20
21 cache_key = get_cache_key(request, key_prefix)
22 cached_data = cache.get(cache_key, None)
23 cache_used = True
24 if not cached_data and data is not None:
25 cache_used = False
26 cached_data = serializer(data, many=True).data
27 cache.set(cache_key, cached_data)
28
29 return cached_data, cache_used
30
31
32 def set_cached_data(request, key_prefix, data):
33 """Function to save data to the cache based on the request."""
34
35 cache_header_key = _generate_cache_header_key(key_prefix, request)
36 if cache.get(cache_header_key) is None:
37 cache.set(cache_header_key, [], None)
38
39 cache_key = get_cache_key(request, key_prefix)
40 cache.set(cache_key, data)
41
42
43 def get_qs_elements_for_page(qs, request, count):
44 """Return queryset elements to be shown on the current page"""
45 limit = int_or_none(request.GET.get('limit')) or settings.PROJECT_DIRECTORY_PAGE_SIZES[0]
46 limit = min(limit, settings.PROJECT_DIRECTORY_PAGE_SIZES[-1])
47 max_page_number = 1 + int(count / limit)
48 page_number = min(max_page_number, int_or_none(request.GET.get('page')) or 1)
49 start = (page_number - 1) * limit
50 end = page_number * limit
51 return qs[start:end]
52
53
54 def int_or_none(value):
55 """Return int or None given a value."""
56 try:
57 return int(value)
58 except Exception:
59 return None
60
61
62 def create_invited_user(email):
63 User = get_user_model()
64 # Check if the user already exists, based on the email address
65 try:
66 invited_user = User.objects.get(email=email)
67 except User.DoesNotExist:
68 try:
69 invited_user = User.objects.create_user(username=email, email=email)
70 except IntegrityError:
71 return None
72 return invited_user
73
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/akvo/rest/views/utils.py b/akvo/rest/views/utils.py
--- a/akvo/rest/views/utils.py
+++ b/akvo/rest/views/utils.py
@@ -63,7 +63,7 @@
User = get_user_model()
# Check if the user already exists, based on the email address
try:
- invited_user = User.objects.get(email=email)
+ invited_user = User.objects.get(email__iexact=email)
except User.DoesNotExist:
try:
invited_user = User.objects.create_user(username=email, email=email)
|
{"golden_diff": "diff --git a/akvo/rest/views/utils.py b/akvo/rest/views/utils.py\n--- a/akvo/rest/views/utils.py\n+++ b/akvo/rest/views/utils.py\n@@ -63,7 +63,7 @@\n User = get_user_model()\n # Check if the user already exists, based on the email address\n try:\n- invited_user = User.objects.get(email=email)\n+ invited_user = User.objects.get(email__iexact=email)\n except User.DoesNotExist:\n try:\n invited_user = User.objects.create_user(username=email, email=email)\n", "issue": "Investigate creation of duplicate user accounts with differently cased emails\n- [ ] Verify that lookups using email are using `__iexact` or something like that. \n- [ ] Figure out a plan for existing duplicates\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Akvo RSR is covered by the GNU Affero General Public License.\n\n# See more details in the license.txt file located at the root folder of the Akvo RSR module.\n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\nfrom django.conf import settings\nfrom django.contrib.auth import get_user_model\nfrom django.core.cache import cache\nfrom django.utils.cache import get_cache_key, _generate_cache_header_key\nfrom django.db import IntegrityError\n\n\ndef get_cached_data(request, key_prefix, data, serializer):\n \"\"\"Function to get serialized data from the cache based on the request.\"\"\"\n cache_header_key = _generate_cache_header_key(key_prefix, request)\n if cache.get(cache_header_key) is None:\n cache.set(cache_header_key, [], None)\n\n cache_key = get_cache_key(request, key_prefix)\n cached_data = cache.get(cache_key, None)\n cache_used = True\n if not cached_data and data is not None:\n cache_used = False\n cached_data = serializer(data, many=True).data\n cache.set(cache_key, cached_data)\n\n return cached_data, cache_used\n\n\ndef set_cached_data(request, key_prefix, data):\n \"\"\"Function to save data to the cache based on the request.\"\"\"\n\n cache_header_key = _generate_cache_header_key(key_prefix, request)\n if cache.get(cache_header_key) is None:\n cache.set(cache_header_key, [], None)\n\n cache_key = get_cache_key(request, key_prefix)\n cache.set(cache_key, data)\n\n\ndef get_qs_elements_for_page(qs, request, count):\n \"\"\"Return queryset elements to be shown on the current page\"\"\"\n limit = int_or_none(request.GET.get('limit')) or settings.PROJECT_DIRECTORY_PAGE_SIZES[0]\n limit = min(limit, settings.PROJECT_DIRECTORY_PAGE_SIZES[-1])\n max_page_number = 1 + int(count / limit)\n page_number = min(max_page_number, int_or_none(request.GET.get('page')) or 1)\n start = (page_number - 1) * limit\n end = page_number * limit\n return qs[start:end]\n\n\ndef int_or_none(value):\n \"\"\"Return int or None given a value.\"\"\"\n try:\n return int(value)\n except Exception:\n return None\n\n\ndef create_invited_user(email):\n User = get_user_model()\n # Check if the user already exists, based on the email address\n try:\n invited_user = User.objects.get(email=email)\n except User.DoesNotExist:\n try:\n invited_user = User.objects.create_user(username=email, email=email)\n except IntegrityError:\n return None\n return invited_user\n", "path": "akvo/rest/views/utils.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Akvo RSR is covered by the GNU Affero General Public License.\n\n# See more details in the license.txt file located at the root folder of the Akvo RSR module.\n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\nfrom django.conf import settings\nfrom django.contrib.auth import get_user_model\nfrom django.core.cache import cache\nfrom django.utils.cache import get_cache_key, _generate_cache_header_key\nfrom django.db import IntegrityError\n\n\ndef get_cached_data(request, key_prefix, data, serializer):\n \"\"\"Function to get serialized data from the cache based on the request.\"\"\"\n cache_header_key = _generate_cache_header_key(key_prefix, request)\n if cache.get(cache_header_key) is None:\n cache.set(cache_header_key, [], None)\n\n cache_key = get_cache_key(request, key_prefix)\n cached_data = cache.get(cache_key, None)\n cache_used = True\n if not cached_data and data is not None:\n cache_used = False\n cached_data = serializer(data, many=True).data\n cache.set(cache_key, cached_data)\n\n return cached_data, cache_used\n\n\ndef set_cached_data(request, key_prefix, data):\n \"\"\"Function to save data to the cache based on the request.\"\"\"\n\n cache_header_key = _generate_cache_header_key(key_prefix, request)\n if cache.get(cache_header_key) is None:\n cache.set(cache_header_key, [], None)\n\n cache_key = get_cache_key(request, key_prefix)\n cache.set(cache_key, data)\n\n\ndef get_qs_elements_for_page(qs, request, count):\n \"\"\"Return queryset elements to be shown on the current page\"\"\"\n limit = int_or_none(request.GET.get('limit')) or settings.PROJECT_DIRECTORY_PAGE_SIZES[0]\n limit = min(limit, settings.PROJECT_DIRECTORY_PAGE_SIZES[-1])\n max_page_number = 1 + int(count / limit)\n page_number = min(max_page_number, int_or_none(request.GET.get('page')) or 1)\n start = (page_number - 1) * limit\n end = page_number * limit\n return qs[start:end]\n\n\ndef int_or_none(value):\n \"\"\"Return int or None given a value.\"\"\"\n try:\n return int(value)\n except Exception:\n return None\n\n\ndef create_invited_user(email):\n User = get_user_model()\n # Check if the user already exists, based on the email address\n try:\n invited_user = User.objects.get(email__iexact=email)\n except User.DoesNotExist:\n try:\n invited_user = User.objects.create_user(username=email, email=email)\n except IntegrityError:\n return None\n return invited_user\n", "path": "akvo/rest/views/utils.py"}]}
| 1,028 | 124 |
gh_patches_debug_7429
|
rasdani/github-patches
|
git_diff
|
cloudtools__troposphere-457
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Elasticsearch Domain DomainName shouldn't be required
According to the CF documentation, `DomainName` isn't required: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticsearch-domain.html
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `troposphere/elasticsearch.py`
Content:
```
1 # Copyright (c) 2012-2015, Mark Peek <[email protected]>
2 # All rights reserved.
3 #
4 # See LICENSE file for full license.
5
6 from . import AWSProperty, AWSObject
7 from .validators import boolean, integer, integer_range, positive_integer
8
9 VALID_VOLUME_TYPES = ('standard', 'gp2', 'io1')
10
11 try:
12 from awacs.aws import Policy
13 policytypes = (dict, Policy)
14 except ImportError:
15 policytypes = dict,
16
17
18 def validate_volume_type(volume_type):
19 """Validate VolumeType for ElasticsearchDomain"""
20 if volume_type not in VALID_VOLUME_TYPES:
21 raise ValueError("Elasticsearch Domain VolumeType must be one of: %s" %
22 ", ".join(VALID_VOLUME_TYPES))
23 return volume_type
24
25
26 class EBSOptions(AWSProperty):
27 props = {
28 'EBSEnabled': (boolean, False),
29 'Iops': (positive_integer, False),
30 'VolumeSize': (integer, False),
31 'VolumeType': (validate_volume_type, False)
32 }
33
34 def validate(self):
35 volume_type = self.properties.get('VolumeType')
36 iops = self.properties.get('Iops')
37 if volume_type == 'io1' and not iops:
38 raise ValueError("Must specify Iops if VolumeType is 'io1'.")
39
40
41 class ElasticsearchClusterConfig(AWSProperty):
42 props = {
43 'DedicatedMasterCount': (integer, False),
44 'DedicatedMasterEnabled': (boolean, False),
45 'DedicatedMasterType': (basestring, False),
46 'InstanceCount': (integer, False),
47 'InstanceType': (basestring, False),
48 'ZoneAwarenessEnabled': (boolean, False)
49 }
50
51
52 class SnapshotOptions(AWSProperty):
53 props = {
54 'AutomatedSnapshotStartHour': (integer_range(0, 23), False)
55 }
56
57
58 class ElasticsearchDomain(AWSObject):
59 resource_type = "AWS::Elasticsearch::Domain"
60
61 props = {
62 'AccessPolicies': (policytypes, False),
63 'AdvancedOptions': (dict, False),
64 'DomainName': (basestring, True),
65 'EBSOptions': (EBSOptions, False),
66 'ElasticsearchClusterConfig': (ElasticsearchClusterConfig, False),
67 'SnapshotOptions': (SnapshotOptions, False),
68 'Tags': (list, False)
69 }
70
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/troposphere/elasticsearch.py b/troposphere/elasticsearch.py
--- a/troposphere/elasticsearch.py
+++ b/troposphere/elasticsearch.py
@@ -61,7 +61,7 @@
props = {
'AccessPolicies': (policytypes, False),
'AdvancedOptions': (dict, False),
- 'DomainName': (basestring, True),
+ 'DomainName': (basestring, False),
'EBSOptions': (EBSOptions, False),
'ElasticsearchClusterConfig': (ElasticsearchClusterConfig, False),
'SnapshotOptions': (SnapshotOptions, False),
|
{"golden_diff": "diff --git a/troposphere/elasticsearch.py b/troposphere/elasticsearch.py\n--- a/troposphere/elasticsearch.py\n+++ b/troposphere/elasticsearch.py\n@@ -61,7 +61,7 @@\n props = {\n 'AccessPolicies': (policytypes, False),\n 'AdvancedOptions': (dict, False),\n- 'DomainName': (basestring, True),\n+ 'DomainName': (basestring, False),\n 'EBSOptions': (EBSOptions, False),\n 'ElasticsearchClusterConfig': (ElasticsearchClusterConfig, False),\n 'SnapshotOptions': (SnapshotOptions, False),\n", "issue": "Elasticsearch Domain DomainName shouldn't be required\nAccording to the CF documentation, `DomainName` isn't required: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticsearch-domain.html\n\n", "before_files": [{"content": "# Copyright (c) 2012-2015, Mark Peek <[email protected]>\n# All rights reserved.\n#\n# See LICENSE file for full license.\n\nfrom . import AWSProperty, AWSObject\nfrom .validators import boolean, integer, integer_range, positive_integer\n\nVALID_VOLUME_TYPES = ('standard', 'gp2', 'io1')\n\ntry:\n from awacs.aws import Policy\n policytypes = (dict, Policy)\nexcept ImportError:\n policytypes = dict,\n\n\ndef validate_volume_type(volume_type):\n \"\"\"Validate VolumeType for ElasticsearchDomain\"\"\"\n if volume_type not in VALID_VOLUME_TYPES:\n raise ValueError(\"Elasticsearch Domain VolumeType must be one of: %s\" %\n \", \".join(VALID_VOLUME_TYPES))\n return volume_type\n\n\nclass EBSOptions(AWSProperty):\n props = {\n 'EBSEnabled': (boolean, False),\n 'Iops': (positive_integer, False),\n 'VolumeSize': (integer, False),\n 'VolumeType': (validate_volume_type, False)\n }\n\n def validate(self):\n volume_type = self.properties.get('VolumeType')\n iops = self.properties.get('Iops')\n if volume_type == 'io1' and not iops:\n raise ValueError(\"Must specify Iops if VolumeType is 'io1'.\")\n\n\nclass ElasticsearchClusterConfig(AWSProperty):\n props = {\n 'DedicatedMasterCount': (integer, False),\n 'DedicatedMasterEnabled': (boolean, False),\n 'DedicatedMasterType': (basestring, False),\n 'InstanceCount': (integer, False),\n 'InstanceType': (basestring, False),\n 'ZoneAwarenessEnabled': (boolean, False)\n }\n\n\nclass SnapshotOptions(AWSProperty):\n props = {\n 'AutomatedSnapshotStartHour': (integer_range(0, 23), False)\n }\n\n\nclass ElasticsearchDomain(AWSObject):\n resource_type = \"AWS::Elasticsearch::Domain\"\n\n props = {\n 'AccessPolicies': (policytypes, False),\n 'AdvancedOptions': (dict, False),\n 'DomainName': (basestring, True),\n 'EBSOptions': (EBSOptions, False),\n 'ElasticsearchClusterConfig': (ElasticsearchClusterConfig, False),\n 'SnapshotOptions': (SnapshotOptions, False),\n 'Tags': (list, False)\n }\n", "path": "troposphere/elasticsearch.py"}], "after_files": [{"content": "# Copyright (c) 2012-2015, Mark Peek <[email protected]>\n# All rights reserved.\n#\n# See LICENSE file for full license.\n\nfrom . import AWSProperty, AWSObject\nfrom .validators import boolean, integer, integer_range, positive_integer\n\nVALID_VOLUME_TYPES = ('standard', 'gp2', 'io1')\n\ntry:\n from awacs.aws import Policy\n policytypes = (dict, Policy)\nexcept ImportError:\n policytypes = dict,\n\n\ndef validate_volume_type(volume_type):\n \"\"\"Validate VolumeType for ElasticsearchDomain\"\"\"\n if volume_type not in VALID_VOLUME_TYPES:\n raise ValueError(\"Elasticsearch Domain VolumeType must be one of: %s\" %\n \", \".join(VALID_VOLUME_TYPES))\n return volume_type\n\n\nclass EBSOptions(AWSProperty):\n props = {\n 'EBSEnabled': (boolean, False),\n 'Iops': (positive_integer, False),\n 'VolumeSize': (integer, False),\n 'VolumeType': (validate_volume_type, False)\n }\n\n def validate(self):\n volume_type = self.properties.get('VolumeType')\n iops = self.properties.get('Iops')\n if volume_type == 'io1' and not iops:\n raise ValueError(\"Must specify Iops if VolumeType is 'io1'.\")\n\n\nclass ElasticsearchClusterConfig(AWSProperty):\n props = {\n 'DedicatedMasterCount': (integer, False),\n 'DedicatedMasterEnabled': (boolean, False),\n 'DedicatedMasterType': (basestring, False),\n 'InstanceCount': (integer, False),\n 'InstanceType': (basestring, False),\n 'ZoneAwarenessEnabled': (boolean, False)\n }\n\n\nclass SnapshotOptions(AWSProperty):\n props = {\n 'AutomatedSnapshotStartHour': (integer_range(0, 23), False)\n }\n\n\nclass ElasticsearchDomain(AWSObject):\n resource_type = \"AWS::Elasticsearch::Domain\"\n\n props = {\n 'AccessPolicies': (policytypes, False),\n 'AdvancedOptions': (dict, False),\n 'DomainName': (basestring, False),\n 'EBSOptions': (EBSOptions, False),\n 'ElasticsearchClusterConfig': (ElasticsearchClusterConfig, False),\n 'SnapshotOptions': (SnapshotOptions, False),\n 'Tags': (list, False)\n }\n", "path": "troposphere/elasticsearch.py"}]}
| 960 | 139 |
gh_patches_debug_60691
|
rasdani/github-patches
|
git_diff
|
biolab__orange3-text-524
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
text processing module with slow internet conection
[textprocessingerr.txt](https://github.com/biolab/orange3-text/files/4551540/textprocessingerr.txt)
<!--
This is an issue template. Please fill in the relevant details in the
sections below.
-->
Hi! I have slow internet connection, less than 400Kbit/s. And when I use text preprocessing module I have an error.
If I disconnect my internet connection (disable wifi), the text processing module works fine.
If I have have internet connection more than 3Mbit/s, everything works fine.
##### Text version 0.9/0.8
##### Orange version 3.26/3.25
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `orangecontrib/text/preprocess/normalize.py`
Content:
```
1 import os
2 import json
3 import ufal.udpipe as udpipe
4 import serverfiles
5 from nltk import stem
6 from requests.exceptions import ConnectionError
7 from Orange.misc.environ import data_dir
8
9
10 from orangecontrib.text.misc import wait_nltk_data
11
12 __all__ = ['BaseNormalizer', 'WordNetLemmatizer', 'PorterStemmer',
13 'SnowballStemmer', 'DictionaryLookupNormalizer',
14 'UDPipeLemmatizer']
15
16
17 class BaseNormalizer:
18 """ A generic normalizer class.
19 You should either overwrite `normalize` method or provide a custom
20 normalizer.
21
22 Attributes:
23 name(str): A short name for normalization method (will be used in OWPreprocessor)
24 normalizer(Callable): An callabale object to be used for normalization.
25
26 """
27 name = NotImplemented
28 normalizer = NotImplemented
29 str_format = '{self.name}'
30
31 def __call__(self, tokens):
32 """ Normalizes tokens to canonical form. """
33 if isinstance(tokens, str):
34 return self.normalize(tokens)
35 return [self.normalize(token) for token in tokens]
36
37 def normalize(self, token):
38 return self.normalizer(token)
39
40 def __str__(self):
41 return self.str_format.format(self=self)
42
43
44 class WordNetLemmatizer(BaseNormalizer):
45 name = 'WordNet Lemmatizer'
46 normalizer = stem.WordNetLemmatizer().lemmatize
47
48 @wait_nltk_data
49 def __init__(self):
50 super().__init__()
51
52
53 class DictionaryLookupNormalizer(BaseNormalizer):
54 """ Normalizes token with a <token: canonical_form> dictionary. """
55 name = 'Dictionary Lookup'
56
57 def __init__(self, dictionary):
58 super().__init__()
59 self.dictionary = dictionary
60
61 def normalize(self, token):
62 return self.dictionary.get(token, token)
63
64
65 class PorterStemmer(BaseNormalizer):
66 name = 'Porter Stemmer'
67 normalizer = stem.PorterStemmer().stem
68
69
70 class SnowballStemmer(BaseNormalizer):
71 name = 'Snowball Stemmer'
72 str_format = '{self.name} ({self.language})'
73 supported_languages = [l.capitalize() for l in stem.SnowballStemmer.languages]
74
75 def __init__(self, language='English'):
76 self._language = language
77 self.normalizer = stem.SnowballStemmer(self.language.lower())
78
79 def normalize(self, token):
80 return self.normalizer.stem(token)
81
82 @property
83 def language(self):
84 return self._language
85
86 @language.setter
87 def language(self, value):
88 self._language = value
89 self.normalizer = stem.SnowballStemmer(self.language.lower())
90
91
92 def language_to_name(language):
93 return language.lower().replace(' ', '') + 'ud'
94
95
96 def file_to_name(file):
97 return file.replace('-', '').replace('_', '')
98
99
100 def file_to_language(file):
101 return file[:file.find('ud')-1]\
102 .replace('-', ' ').replace('_', ' ').capitalize()
103
104
105 class UDPipeModels:
106 server_url = "http://file.biolab.si/files/udpipe/"
107
108 def __init__(self):
109 self.local_data = os.path.join(data_dir(versioned=False), 'udpipe/')
110 self.serverfiles = serverfiles.ServerFiles(self.server_url)
111 self.localfiles = serverfiles.LocalFiles(self.local_data,
112 serverfiles=self.serverfiles)
113 self._supported_languages = []
114
115 def __getitem__(self, language):
116 file_name = self._find_file(language_to_name(language))
117 return self.localfiles.localpath_download(file_name)
118
119 @property
120 def model_files(self):
121 try:
122 return self.serverfiles.listfiles()
123 except ConnectionError:
124 return self.localfiles.listfiles()
125
126 def _find_file(self, language):
127 return next(filter(lambda f: file_to_name(f).startswith(language),
128 map(lambda f: f[0], self.model_files)))
129
130 @property
131 def supported_languages(self):
132 self._supported_languages = list(map(lambda f: file_to_language(f[0]),
133 self.model_files))
134 return self._supported_languages
135
136 @property
137 def online(self):
138 try:
139 self.serverfiles.listfiles()
140 return True
141 except ConnectionError:
142 return False
143
144
145 class UDPipeLemmatizer(BaseNormalizer):
146 name = 'UDPipe Lemmatizer'
147 str_format = '{self.name} ({self.language})'
148
149 def __init__(self, language='English'):
150 self._language = language
151 self.models = UDPipeModels()
152 self.model = None
153 self.output_format = udpipe.OutputFormat.newOutputFormat('epe')
154 self.use_tokenizer = False
155
156 def load_model(self):
157 if self.model is None:
158 self.model = udpipe.Model.load(self.models[self._language])
159
160 def normalize(self, token):
161 self.load_model()
162 sentence = udpipe.Sentence()
163 sentence.addWord(token)
164 self.model.tag(sentence, self.model.DEFAULT)
165 output = self.output_format.writeSentence(sentence)
166 return json.loads(output)['nodes'][0]['properties']['lemma']
167
168 def normalize_doc(self, document):
169 self.load_model()
170 tokens = []
171 tokenizer = self.model.newTokenizer(self.model.DEFAULT)
172 tokenizer.setText(document)
173 error = udpipe.ProcessingError()
174 sentence = udpipe.Sentence()
175 while tokenizer.nextSentence(sentence, error):
176 self.model.tag(sentence, self.model.DEFAULT)
177 output = self.output_format.writeSentence(sentence)
178 sentence = udpipe.Sentence()
179 tokens.extend([t['properties']['lemma']
180 for t in json.loads(output)['nodes']])
181 return tokens
182
183 @property
184 def language(self):
185 return self._language
186
187 @language.setter
188 def language(self, value):
189 self._language = value
190 self.model = None
191
192 def __getstate__(self):
193 return {'language': self.language}
194
195 def __setstate__(self, state):
196 self.__init__(state['language'])
197
198
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/orangecontrib/text/preprocess/normalize.py b/orangecontrib/text/preprocess/normalize.py
--- a/orangecontrib/text/preprocess/normalize.py
+++ b/orangecontrib/text/preprocess/normalize.py
@@ -103,7 +103,7 @@
class UDPipeModels:
- server_url = "http://file.biolab.si/files/udpipe/"
+ server_url = "https://file.biolab.si/files/udpipe/"
def __init__(self):
self.local_data = os.path.join(data_dir(versioned=False), 'udpipe/')
|
{"golden_diff": "diff --git a/orangecontrib/text/preprocess/normalize.py b/orangecontrib/text/preprocess/normalize.py\n--- a/orangecontrib/text/preprocess/normalize.py\n+++ b/orangecontrib/text/preprocess/normalize.py\n@@ -103,7 +103,7 @@\n \n \n class UDPipeModels:\n- server_url = \"http://file.biolab.si/files/udpipe/\"\n+ server_url = \"https://file.biolab.si/files/udpipe/\"\n \n def __init__(self):\n self.local_data = os.path.join(data_dir(versioned=False), 'udpipe/')\n", "issue": "text processing module with slow internet conection\n[textprocessingerr.txt](https://github.com/biolab/orange3-text/files/4551540/textprocessingerr.txt)\r\n<!--\r\nThis is an issue template. Please fill in the relevant details in the\r\nsections below.\r\n-->\r\nHi! I have slow internet connection, less than 400Kbit/s. And when I use text preprocessing module I have an error.\r\nIf I disconnect my internet connection (disable wifi), the text processing module works fine.\r\nIf I have have internet connection more than 3Mbit/s, everything works fine.\r\n##### Text version 0.9/0.8\r\n##### Orange version 3.26/3.25\r\n\r\n\r\n\n", "before_files": [{"content": "import os\nimport json\nimport ufal.udpipe as udpipe\nimport serverfiles\nfrom nltk import stem\nfrom requests.exceptions import ConnectionError\nfrom Orange.misc.environ import data_dir\n\n\nfrom orangecontrib.text.misc import wait_nltk_data\n\n__all__ = ['BaseNormalizer', 'WordNetLemmatizer', 'PorterStemmer',\n 'SnowballStemmer', 'DictionaryLookupNormalizer',\n 'UDPipeLemmatizer']\n\n\nclass BaseNormalizer:\n \"\"\" A generic normalizer class.\n You should either overwrite `normalize` method or provide a custom\n normalizer.\n\n Attributes:\n name(str): A short name for normalization method (will be used in OWPreprocessor)\n normalizer(Callable): An callabale object to be used for normalization.\n\n \"\"\"\n name = NotImplemented\n normalizer = NotImplemented\n str_format = '{self.name}'\n\n def __call__(self, tokens):\n \"\"\" Normalizes tokens to canonical form. \"\"\"\n if isinstance(tokens, str):\n return self.normalize(tokens)\n return [self.normalize(token) for token in tokens]\n\n def normalize(self, token):\n return self.normalizer(token)\n\n def __str__(self):\n return self.str_format.format(self=self)\n\n\nclass WordNetLemmatizer(BaseNormalizer):\n name = 'WordNet Lemmatizer'\n normalizer = stem.WordNetLemmatizer().lemmatize\n\n @wait_nltk_data\n def __init__(self):\n super().__init__()\n\n\nclass DictionaryLookupNormalizer(BaseNormalizer):\n \"\"\" Normalizes token with a <token: canonical_form> dictionary. \"\"\"\n name = 'Dictionary Lookup'\n\n def __init__(self, dictionary):\n super().__init__()\n self.dictionary = dictionary\n\n def normalize(self, token):\n return self.dictionary.get(token, token)\n\n\nclass PorterStemmer(BaseNormalizer):\n name = 'Porter Stemmer'\n normalizer = stem.PorterStemmer().stem\n\n\nclass SnowballStemmer(BaseNormalizer):\n name = 'Snowball Stemmer'\n str_format = '{self.name} ({self.language})'\n supported_languages = [l.capitalize() for l in stem.SnowballStemmer.languages]\n\n def __init__(self, language='English'):\n self._language = language\n self.normalizer = stem.SnowballStemmer(self.language.lower())\n\n def normalize(self, token):\n return self.normalizer.stem(token)\n\n @property\n def language(self):\n return self._language\n\n @language.setter\n def language(self, value):\n self._language = value\n self.normalizer = stem.SnowballStemmer(self.language.lower())\n\n\ndef language_to_name(language):\n return language.lower().replace(' ', '') + 'ud'\n\n\ndef file_to_name(file):\n return file.replace('-', '').replace('_', '')\n\n\ndef file_to_language(file):\n return file[:file.find('ud')-1]\\\n .replace('-', ' ').replace('_', ' ').capitalize()\n\n\nclass UDPipeModels:\n server_url = \"http://file.biolab.si/files/udpipe/\"\n\n def __init__(self):\n self.local_data = os.path.join(data_dir(versioned=False), 'udpipe/')\n self.serverfiles = serverfiles.ServerFiles(self.server_url)\n self.localfiles = serverfiles.LocalFiles(self.local_data,\n serverfiles=self.serverfiles)\n self._supported_languages = []\n\n def __getitem__(self, language):\n file_name = self._find_file(language_to_name(language))\n return self.localfiles.localpath_download(file_name)\n\n @property\n def model_files(self):\n try:\n return self.serverfiles.listfiles()\n except ConnectionError:\n return self.localfiles.listfiles()\n\n def _find_file(self, language):\n return next(filter(lambda f: file_to_name(f).startswith(language),\n map(lambda f: f[0], self.model_files)))\n\n @property\n def supported_languages(self):\n self._supported_languages = list(map(lambda f: file_to_language(f[0]),\n self.model_files))\n return self._supported_languages\n\n @property\n def online(self):\n try:\n self.serverfiles.listfiles()\n return True\n except ConnectionError:\n return False\n\n\nclass UDPipeLemmatizer(BaseNormalizer):\n name = 'UDPipe Lemmatizer'\n str_format = '{self.name} ({self.language})'\n\n def __init__(self, language='English'):\n self._language = language\n self.models = UDPipeModels()\n self.model = None\n self.output_format = udpipe.OutputFormat.newOutputFormat('epe')\n self.use_tokenizer = False\n\n def load_model(self):\n if self.model is None:\n self.model = udpipe.Model.load(self.models[self._language])\n\n def normalize(self, token):\n self.load_model()\n sentence = udpipe.Sentence()\n sentence.addWord(token)\n self.model.tag(sentence, self.model.DEFAULT)\n output = self.output_format.writeSentence(sentence)\n return json.loads(output)['nodes'][0]['properties']['lemma']\n\n def normalize_doc(self, document):\n self.load_model()\n tokens = []\n tokenizer = self.model.newTokenizer(self.model.DEFAULT)\n tokenizer.setText(document)\n error = udpipe.ProcessingError()\n sentence = udpipe.Sentence()\n while tokenizer.nextSentence(sentence, error):\n self.model.tag(sentence, self.model.DEFAULT)\n output = self.output_format.writeSentence(sentence)\n sentence = udpipe.Sentence()\n tokens.extend([t['properties']['lemma']\n for t in json.loads(output)['nodes']])\n return tokens\n\n @property\n def language(self):\n return self._language\n\n @language.setter\n def language(self, value):\n self._language = value\n self.model = None\n\n def __getstate__(self):\n return {'language': self.language}\n\n def __setstate__(self, state):\n self.__init__(state['language'])\n\n", "path": "orangecontrib/text/preprocess/normalize.py"}], "after_files": [{"content": "import os\nimport json\nimport ufal.udpipe as udpipe\nimport serverfiles\nfrom nltk import stem\nfrom requests.exceptions import ConnectionError\nfrom Orange.misc.environ import data_dir\n\n\nfrom orangecontrib.text.misc import wait_nltk_data\n\n__all__ = ['BaseNormalizer', 'WordNetLemmatizer', 'PorterStemmer',\n 'SnowballStemmer', 'DictionaryLookupNormalizer',\n 'UDPipeLemmatizer']\n\n\nclass BaseNormalizer:\n \"\"\" A generic normalizer class.\n You should either overwrite `normalize` method or provide a custom\n normalizer.\n\n Attributes:\n name(str): A short name for normalization method (will be used in OWPreprocessor)\n normalizer(Callable): An callabale object to be used for normalization.\n\n \"\"\"\n name = NotImplemented\n normalizer = NotImplemented\n str_format = '{self.name}'\n\n def __call__(self, tokens):\n \"\"\" Normalizes tokens to canonical form. \"\"\"\n if isinstance(tokens, str):\n return self.normalize(tokens)\n return [self.normalize(token) for token in tokens]\n\n def normalize(self, token):\n return self.normalizer(token)\n\n def __str__(self):\n return self.str_format.format(self=self)\n\n\nclass WordNetLemmatizer(BaseNormalizer):\n name = 'WordNet Lemmatizer'\n normalizer = stem.WordNetLemmatizer().lemmatize\n\n @wait_nltk_data\n def __init__(self):\n super().__init__()\n\n\nclass DictionaryLookupNormalizer(BaseNormalizer):\n \"\"\" Normalizes token with a <token: canonical_form> dictionary. \"\"\"\n name = 'Dictionary Lookup'\n\n def __init__(self, dictionary):\n super().__init__()\n self.dictionary = dictionary\n\n def normalize(self, token):\n return self.dictionary.get(token, token)\n\n\nclass PorterStemmer(BaseNormalizer):\n name = 'Porter Stemmer'\n normalizer = stem.PorterStemmer().stem\n\n\nclass SnowballStemmer(BaseNormalizer):\n name = 'Snowball Stemmer'\n str_format = '{self.name} ({self.language})'\n supported_languages = [l.capitalize() for l in stem.SnowballStemmer.languages]\n\n def __init__(self, language='English'):\n self._language = language\n self.normalizer = stem.SnowballStemmer(self.language.lower())\n\n def normalize(self, token):\n return self.normalizer.stem(token)\n\n @property\n def language(self):\n return self._language\n\n @language.setter\n def language(self, value):\n self._language = value\n self.normalizer = stem.SnowballStemmer(self.language.lower())\n\n\ndef language_to_name(language):\n return language.lower().replace(' ', '') + 'ud'\n\n\ndef file_to_name(file):\n return file.replace('-', '').replace('_', '')\n\n\ndef file_to_language(file):\n return file[:file.find('ud')-1]\\\n .replace('-', ' ').replace('_', ' ').capitalize()\n\n\nclass UDPipeModels:\n server_url = \"https://file.biolab.si/files/udpipe/\"\n\n def __init__(self):\n self.local_data = os.path.join(data_dir(versioned=False), 'udpipe/')\n self.serverfiles = serverfiles.ServerFiles(self.server_url)\n self.localfiles = serverfiles.LocalFiles(self.local_data,\n serverfiles=self.serverfiles)\n self._supported_languages = []\n\n def __getitem__(self, language):\n file_name = self._find_file(language_to_name(language))\n return self.localfiles.localpath_download(file_name)\n\n @property\n def model_files(self):\n try:\n return self.serverfiles.listfiles()\n except ConnectionError:\n return self.localfiles.listfiles()\n\n def _find_file(self, language):\n return next(filter(lambda f: file_to_name(f).startswith(language),\n map(lambda f: f[0], self.model_files)))\n\n @property\n def supported_languages(self):\n self._supported_languages = list(map(lambda f: file_to_language(f[0]),\n self.model_files))\n return self._supported_languages\n\n @property\n def online(self):\n try:\n self.serverfiles.listfiles()\n return True\n except ConnectionError:\n return False\n\n\nclass UDPipeLemmatizer(BaseNormalizer):\n name = 'UDPipe Lemmatizer'\n str_format = '{self.name} ({self.language})'\n\n def __init__(self, language='English'):\n self._language = language\n self.models = UDPipeModels()\n self.model = None\n self.output_format = udpipe.OutputFormat.newOutputFormat('epe')\n self.use_tokenizer = False\n\n def load_model(self):\n if self.model is None:\n self.model = udpipe.Model.load(self.models[self._language])\n\n def normalize(self, token):\n self.load_model()\n sentence = udpipe.Sentence()\n sentence.addWord(token)\n self.model.tag(sentence, self.model.DEFAULT)\n output = self.output_format.writeSentence(sentence)\n return json.loads(output)['nodes'][0]['properties']['lemma']\n\n def normalize_doc(self, document):\n self.load_model()\n tokens = []\n tokenizer = self.model.newTokenizer(self.model.DEFAULT)\n tokenizer.setText(document)\n error = udpipe.ProcessingError()\n sentence = udpipe.Sentence()\n while tokenizer.nextSentence(sentence, error):\n self.model.tag(sentence, self.model.DEFAULT)\n output = self.output_format.writeSentence(sentence)\n sentence = udpipe.Sentence()\n tokens.extend([t['properties']['lemma']\n for t in json.loads(output)['nodes']])\n return tokens\n\n @property\n def language(self):\n return self._language\n\n @language.setter\n def language(self, value):\n self._language = value\n self.model = None\n\n def __getstate__(self):\n return {'language': self.language}\n\n def __setstate__(self, state):\n self.__init__(state['language'])\n\n", "path": "orangecontrib/text/preprocess/normalize.py"}]}
| 2,210 | 130 |
gh_patches_debug_6167
|
rasdani/github-patches
|
git_diff
|
mesonbuild__meson-2462
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
MSI installed meson fails to rerun in visual studio
Initially, I ran `meson build` from the source code directory `xxx` to create the build directory.
Later, if any `meson.build` files are modified, Visual studio fails to rerun Meson with the backtrace below. Meson is installed with MSI. It works with ninja as backend. It also works if meson isn't installed with MSI.
It seems like `mesonscript` in `regen_checker` is invalid when meson is installed with MSI.
```
>meson.exe : error : unrecognized arguments: --internal regenerate C:\Users\niklas\Documents\git\xxx C:\Users\niklas\Documents\git\xxx
1> Traceback (most recent call last):
1> File "C:\Users\niklas\AppData\Local\Programs\Python\Python36-32\lib\site-packages\cx_Freeze\initscripts\__startup__.py", line 14, in run
1> module.run()
1> File "C:\Users\niklas\AppData\Local\Programs\Python\Python36-32\lib\site-packages\cx_Freeze\initscripts\Console.py", line 26, in run
1> exec(code, m.__dict__)
1> File "meson.py", line 37, in <module>
1> File "meson.py", line 34, in main
1> File "mesonbuild\mesonmain.py", line 311, in run
1> File "mesonbuild\mesonmain.py", line 278, in run_script_command
1> File "mesonbuild\scripts\regen_checker.py", line 56, in run
1> File "mesonbuild\scripts\regen_checker.py", line 42, in regen
1> File "C:\Users\niklas\AppData\Local\Programs\Python\Python36-32\lib\subprocess.py", line 291, in check_call
1> raise CalledProcessError(retcode, cmd)
1> subprocess.CalledProcessError: Command '['C:\\Program Files\\Meson\\meson.exe', 'C:\\Users\\niklas\\Documents\\git\\xxx\\meson', '--internal', 'regenerate', 'C:\\Users\\niklas\\Documents\\git\\xxx\\build', 'C:\\Users\\niklas\\Documents\\git\\xxx', '--backend=vs2015']' returned non-zero exit status 2.
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mesonbuild/scripts/regen_checker.py`
Content:
```
1 # Copyright 2015-2016 The Meson development team
2
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6
7 # http://www.apache.org/licenses/LICENSE-2.0
8
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import sys, os
16 import pickle, subprocess
17
18 # This could also be used for XCode.
19
20 def need_regen(regeninfo, regen_timestamp):
21 for i in regeninfo.depfiles:
22 curfile = os.path.join(regeninfo.build_dir, i)
23 curtime = os.stat(curfile).st_mtime
24 if curtime > regen_timestamp:
25 return True
26 # The timestamp file gets automatically deleted by MSBuild during a 'Clean' build.
27 # We must make sure to recreate it, even if we do not regenerate the solution.
28 # Otherwise, Visual Studio will always consider the REGEN project out of date.
29 print("Everything is up-to-date, regeneration of build files is not needed.")
30 from ..backend.vs2010backend import Vs2010Backend
31 Vs2010Backend.touch_regen_timestamp(regeninfo.build_dir)
32 return False
33
34 def regen(regeninfo, mesonscript, backend):
35 cmd = [sys.executable,
36 mesonscript,
37 '--internal',
38 'regenerate',
39 regeninfo.build_dir,
40 regeninfo.source_dir,
41 '--backend=' + backend]
42 subprocess.check_call(cmd)
43
44 def run(args):
45 private_dir = args[0]
46 dumpfile = os.path.join(private_dir, 'regeninfo.dump')
47 coredata = os.path.join(private_dir, 'coredata.dat')
48 with open(dumpfile, 'rb') as f:
49 regeninfo = pickle.load(f)
50 with open(coredata, 'rb') as f:
51 coredata = pickle.load(f)
52 mesonscript = coredata.meson_script_launcher
53 backend = coredata.get_builtin_option('backend')
54 regen_timestamp = os.stat(dumpfile).st_mtime
55 if need_regen(regeninfo, regen_timestamp):
56 regen(regeninfo, mesonscript, backend)
57 sys.exit(0)
58
59 if __name__ == '__main__':
60 run(sys.argv[1:])
61
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/mesonbuild/scripts/regen_checker.py b/mesonbuild/scripts/regen_checker.py
--- a/mesonbuild/scripts/regen_checker.py
+++ b/mesonbuild/scripts/regen_checker.py
@@ -32,9 +32,11 @@
return False
def regen(regeninfo, mesonscript, backend):
- cmd = [sys.executable,
- mesonscript,
- '--internal',
+ if sys.executable.lower().endswith('meson.exe'):
+ cmd_exe = [sys.executable]
+ else:
+ cmd_exe = [sys.executable, mesonscript]
+ cmd = cmd_exe + ['--internal',
'regenerate',
regeninfo.build_dir,
regeninfo.source_dir,
|
{"golden_diff": "diff --git a/mesonbuild/scripts/regen_checker.py b/mesonbuild/scripts/regen_checker.py\n--- a/mesonbuild/scripts/regen_checker.py\n+++ b/mesonbuild/scripts/regen_checker.py\n@@ -32,9 +32,11 @@\n return False\n \n def regen(regeninfo, mesonscript, backend):\n- cmd = [sys.executable,\n- mesonscript,\n- '--internal',\n+ if sys.executable.lower().endswith('meson.exe'):\n+ cmd_exe = [sys.executable]\n+ else:\n+ cmd_exe = [sys.executable, mesonscript]\n+ cmd = cmd_exe + ['--internal',\n 'regenerate',\n regeninfo.build_dir,\n regeninfo.source_dir,\n", "issue": "MSI installed meson fails to rerun in visual studio\nInitially, I ran `meson build` from the source code directory `xxx` to create the build directory.\r\nLater, if any `meson.build` files are modified, Visual studio fails to rerun Meson with the backtrace below. Meson is installed with MSI. It works with ninja as backend. It also works if meson isn't installed with MSI.\r\n\r\nIt seems like `mesonscript` in `regen_checker` is invalid when meson is installed with MSI.\r\n\r\n```\r\n>meson.exe : error : unrecognized arguments: --internal regenerate C:\\Users\\niklas\\Documents\\git\\xxx C:\\Users\\niklas\\Documents\\git\\xxx\r\n1> Traceback (most recent call last):\r\n1> File \"C:\\Users\\niklas\\AppData\\Local\\Programs\\Python\\Python36-32\\lib\\site-packages\\cx_Freeze\\initscripts\\__startup__.py\", line 14, in run\r\n1> module.run()\r\n1> File \"C:\\Users\\niklas\\AppData\\Local\\Programs\\Python\\Python36-32\\lib\\site-packages\\cx_Freeze\\initscripts\\Console.py\", line 26, in run\r\n1> exec(code, m.__dict__)\r\n1> File \"meson.py\", line 37, in <module>\r\n1> File \"meson.py\", line 34, in main\r\n1> File \"mesonbuild\\mesonmain.py\", line 311, in run\r\n1> File \"mesonbuild\\mesonmain.py\", line 278, in run_script_command\r\n1> File \"mesonbuild\\scripts\\regen_checker.py\", line 56, in run\r\n1> File \"mesonbuild\\scripts\\regen_checker.py\", line 42, in regen\r\n1> File \"C:\\Users\\niklas\\AppData\\Local\\Programs\\Python\\Python36-32\\lib\\subprocess.py\", line 291, in check_call\r\n1> raise CalledProcessError(retcode, cmd)\r\n1> subprocess.CalledProcessError: Command '['C:\\\\Program Files\\\\Meson\\\\meson.exe', 'C:\\\\Users\\\\niklas\\\\Documents\\\\git\\\\xxx\\\\meson', '--internal', 'regenerate', 'C:\\\\Users\\\\niklas\\\\Documents\\\\git\\\\xxx\\\\build', 'C:\\\\Users\\\\niklas\\\\Documents\\\\git\\\\xxx', '--backend=vs2015']' returned non-zero exit status 2.\r\n```\n", "before_files": [{"content": "# Copyright 2015-2016 The Meson development team\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport sys, os\nimport pickle, subprocess\n\n# This could also be used for XCode.\n\ndef need_regen(regeninfo, regen_timestamp):\n for i in regeninfo.depfiles:\n curfile = os.path.join(regeninfo.build_dir, i)\n curtime = os.stat(curfile).st_mtime\n if curtime > regen_timestamp:\n return True\n # The timestamp file gets automatically deleted by MSBuild during a 'Clean' build.\n # We must make sure to recreate it, even if we do not regenerate the solution.\n # Otherwise, Visual Studio will always consider the REGEN project out of date.\n print(\"Everything is up-to-date, regeneration of build files is not needed.\")\n from ..backend.vs2010backend import Vs2010Backend\n Vs2010Backend.touch_regen_timestamp(regeninfo.build_dir)\n return False\n\ndef regen(regeninfo, mesonscript, backend):\n cmd = [sys.executable,\n mesonscript,\n '--internal',\n 'regenerate',\n regeninfo.build_dir,\n regeninfo.source_dir,\n '--backend=' + backend]\n subprocess.check_call(cmd)\n\ndef run(args):\n private_dir = args[0]\n dumpfile = os.path.join(private_dir, 'regeninfo.dump')\n coredata = os.path.join(private_dir, 'coredata.dat')\n with open(dumpfile, 'rb') as f:\n regeninfo = pickle.load(f)\n with open(coredata, 'rb') as f:\n coredata = pickle.load(f)\n mesonscript = coredata.meson_script_launcher\n backend = coredata.get_builtin_option('backend')\n regen_timestamp = os.stat(dumpfile).st_mtime\n if need_regen(regeninfo, regen_timestamp):\n regen(regeninfo, mesonscript, backend)\n sys.exit(0)\n\nif __name__ == '__main__':\n run(sys.argv[1:])\n", "path": "mesonbuild/scripts/regen_checker.py"}], "after_files": [{"content": "# Copyright 2015-2016 The Meson development team\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport sys, os\nimport pickle, subprocess\n\n# This could also be used for XCode.\n\ndef need_regen(regeninfo, regen_timestamp):\n for i in regeninfo.depfiles:\n curfile = os.path.join(regeninfo.build_dir, i)\n curtime = os.stat(curfile).st_mtime\n if curtime > regen_timestamp:\n return True\n # The timestamp file gets automatically deleted by MSBuild during a 'Clean' build.\n # We must make sure to recreate it, even if we do not regenerate the solution.\n # Otherwise, Visual Studio will always consider the REGEN project out of date.\n print(\"Everything is up-to-date, regeneration of build files is not needed.\")\n from ..backend.vs2010backend import Vs2010Backend\n Vs2010Backend.touch_regen_timestamp(regeninfo.build_dir)\n return False\n\ndef regen(regeninfo, mesonscript, backend):\n if sys.executable.lower().endswith('meson.exe'):\n cmd_exe = [sys.executable]\n else:\n cmd_exe = [sys.executable, mesonscript]\n cmd = cmd_exe + ['--internal',\n 'regenerate',\n regeninfo.build_dir,\n regeninfo.source_dir,\n '--backend=' + backend]\n subprocess.check_call(cmd)\n\ndef run(args):\n private_dir = args[0]\n dumpfile = os.path.join(private_dir, 'regeninfo.dump')\n coredata = os.path.join(private_dir, 'coredata.dat')\n with open(dumpfile, 'rb') as f:\n regeninfo = pickle.load(f)\n with open(coredata, 'rb') as f:\n coredata = pickle.load(f)\n mesonscript = coredata.meson_script_launcher\n backend = coredata.get_builtin_option('backend')\n regen_timestamp = os.stat(dumpfile).st_mtime\n if need_regen(regeninfo, regen_timestamp):\n regen(regeninfo, mesonscript, backend)\n sys.exit(0)\n\nif __name__ == '__main__':\n run(sys.argv[1:])\n", "path": "mesonbuild/scripts/regen_checker.py"}]}
| 1,510 | 167 |
gh_patches_debug_5400
|
rasdani/github-patches
|
git_diff
|
alltheplaces__alltheplaces-2874
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Spider tgifridays is broken
During the global build at 2021-05-26-14-42-23, spider **tgifridays** failed with **0 features** and **0 errors**.
Here's [the log](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/logs/tgifridays.log) and [the output](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/tgifridays.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/tgifridays.geojson))
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `locations/spiders/tgifridays.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 import datetime
3 import re
4 import json
5
6 import scrapy
7 from locations.items import GeojsonPointItem
8 from locations.hours import OpeningHours
9
10
11 DAY_MAPPING = {
12 'Monday': 'Mo',
13 'Tuesday': 'Tu',
14 'Wednesday': 'We',
15 'Thursday': 'Th',
16 'Friday': 'Fr',
17 'Saturday': 'Sa',
18 'Sunday': 'Su'
19 }
20
21
22 class TGIFridaySpider(scrapy.Spider):
23 download_delay = 0.2
24 name = "tgifridays"
25 item_attributes = { 'brand': "TGI Friday's" }
26 allowed_domains = ["tgifridays.com"]
27 start_urls = (
28 'https://locations.tgifridays.com/sitemap.xml',
29 )
30
31 def parse_hours(self, hours):
32 opening_hours = OpeningHours()
33
34 for hour in hours:
35 if hour["opens"] == "Closed":
36 continue
37 elif hour["closes"] == "Closed":
38 continue
39 else:
40 opening_hours.add_range(
41 day=hour["dayOfWeek"].replace('http://schema.org/', '')[:2],
42 open_time=hour["opens"],
43 close_time=hour["closes"],
44 time_format='%I:%M%p',
45 )
46
47 return opening_hours.as_opening_hours()
48
49 def parse_store(self, response):
50 # The JSON blob has an extra "}\r\n" at the end
51 data = json.loads(response.xpath('//script[@type="application/ld+json"]/text()').extract_first()[:-3])
52
53 properties = {
54 'addr_full': data['address']['streetAddress'],
55 'phone': data['telephone'],
56 'city': data['address']['addressLocality'],
57 'state': data['address']['addressRegion'],
58 'postcode': data['address']['postalCode'],
59 'country': data['address']['addressCountry'],
60 'ref': data['@id'],
61 'website': data['url'],
62 'lat': data['geo']['latitude'],
63 'lon': data['geo']['longitude'],
64 'name': data['name'],
65 }
66
67 hours = self.parse_hours(data.get("openingHoursSpecification", []))
68 if hours:
69 properties["opening_hours"] = hours
70
71 yield GeojsonPointItem(**properties)
72
73 def parse(self, response):
74 response.selector.remove_namespaces()
75 city_urls = response.xpath('//url/loc/text()').extract()
76 for path in city_urls:
77 if path.count('/') == 5:
78 yield scrapy.Request(
79 path.strip(),
80 callback=self.parse_store,
81 )
82
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/locations/spiders/tgifridays.py b/locations/spiders/tgifridays.py
--- a/locations/spiders/tgifridays.py
+++ b/locations/spiders/tgifridays.py
@@ -32,9 +32,9 @@
opening_hours = OpeningHours()
for hour in hours:
- if hour["opens"] == "Closed":
+ if hour["opens"] in ("Closed", ""):
continue
- elif hour["closes"] == "Closed":
+ elif hour["closes"] in ("Closed", ""):
continue
else:
opening_hours.add_range(
|
{"golden_diff": "diff --git a/locations/spiders/tgifridays.py b/locations/spiders/tgifridays.py\n--- a/locations/spiders/tgifridays.py\n+++ b/locations/spiders/tgifridays.py\n@@ -32,9 +32,9 @@\n opening_hours = OpeningHours()\n \n for hour in hours:\n- if hour[\"opens\"] == \"Closed\":\n+ if hour[\"opens\"] in (\"Closed\", \"\"):\n continue\n- elif hour[\"closes\"] == \"Closed\":\n+ elif hour[\"closes\"] in (\"Closed\", \"\"):\n continue\n else:\n opening_hours.add_range(\n", "issue": "Spider tgifridays is broken\nDuring the global build at 2021-05-26-14-42-23, spider **tgifridays** failed with **0 features** and **0 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/logs/tgifridays.log) and [the output](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/tgifridays.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/tgifridays.geojson))\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport datetime\nimport re\nimport json\n\nimport scrapy\nfrom locations.items import GeojsonPointItem\nfrom locations.hours import OpeningHours\n\n\nDAY_MAPPING = {\n 'Monday': 'Mo',\n 'Tuesday': 'Tu',\n 'Wednesday': 'We',\n 'Thursday': 'Th',\n 'Friday': 'Fr',\n 'Saturday': 'Sa',\n 'Sunday': 'Su'\n}\n\n\nclass TGIFridaySpider(scrapy.Spider):\n download_delay = 0.2\n name = \"tgifridays\"\n item_attributes = { 'brand': \"TGI Friday's\" }\n allowed_domains = [\"tgifridays.com\"]\n start_urls = (\n 'https://locations.tgifridays.com/sitemap.xml',\n )\n\n def parse_hours(self, hours):\n opening_hours = OpeningHours()\n\n for hour in hours:\n if hour[\"opens\"] == \"Closed\":\n continue\n elif hour[\"closes\"] == \"Closed\":\n continue\n else:\n opening_hours.add_range(\n day=hour[\"dayOfWeek\"].replace('http://schema.org/', '')[:2],\n open_time=hour[\"opens\"],\n close_time=hour[\"closes\"],\n time_format='%I:%M%p',\n )\n\n return opening_hours.as_opening_hours()\n\n def parse_store(self, response):\n # The JSON blob has an extra \"}\\r\\n\" at the end\n data = json.loads(response.xpath('//script[@type=\"application/ld+json\"]/text()').extract_first()[:-3])\n\n properties = {\n 'addr_full': data['address']['streetAddress'],\n 'phone': data['telephone'],\n 'city': data['address']['addressLocality'],\n 'state': data['address']['addressRegion'],\n 'postcode': data['address']['postalCode'],\n 'country': data['address']['addressCountry'],\n 'ref': data['@id'],\n 'website': data['url'],\n 'lat': data['geo']['latitude'],\n 'lon': data['geo']['longitude'],\n 'name': data['name'],\n }\n\n hours = self.parse_hours(data.get(\"openingHoursSpecification\", []))\n if hours:\n properties[\"opening_hours\"] = hours\n\n yield GeojsonPointItem(**properties)\n\n def parse(self, response):\n response.selector.remove_namespaces()\n city_urls = response.xpath('//url/loc/text()').extract()\n for path in city_urls:\n if path.count('/') == 5:\n yield scrapy.Request(\n path.strip(),\n callback=self.parse_store,\n )\n", "path": "locations/spiders/tgifridays.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nimport datetime\nimport re\nimport json\n\nimport scrapy\nfrom locations.items import GeojsonPointItem\nfrom locations.hours import OpeningHours\n\n\nDAY_MAPPING = {\n 'Monday': 'Mo',\n 'Tuesday': 'Tu',\n 'Wednesday': 'We',\n 'Thursday': 'Th',\n 'Friday': 'Fr',\n 'Saturday': 'Sa',\n 'Sunday': 'Su'\n}\n\n\nclass TGIFridaySpider(scrapy.Spider):\n download_delay = 0.2\n name = \"tgifridays\"\n item_attributes = { 'brand': \"TGI Friday's\" }\n allowed_domains = [\"tgifridays.com\"]\n start_urls = (\n 'https://locations.tgifridays.com/sitemap.xml',\n )\n\n def parse_hours(self, hours):\n opening_hours = OpeningHours()\n\n for hour in hours:\n if hour[\"opens\"] in (\"Closed\", \"\"):\n continue\n elif hour[\"closes\"] in (\"Closed\", \"\"):\n continue\n else:\n opening_hours.add_range(\n day=hour[\"dayOfWeek\"].replace('http://schema.org/', '')[:2],\n open_time=hour[\"opens\"],\n close_time=hour[\"closes\"],\n time_format='%I:%M%p',\n )\n\n return opening_hours.as_opening_hours()\n\n def parse_store(self, response):\n # The JSON blob has an extra \"}\\r\\n\" at the end\n data = json.loads(response.xpath('//script[@type=\"application/ld+json\"]/text()').extract_first()[:-3])\n\n properties = {\n 'addr_full': data['address']['streetAddress'],\n 'phone': data['telephone'],\n 'city': data['address']['addressLocality'],\n 'state': data['address']['addressRegion'],\n 'postcode': data['address']['postalCode'],\n 'country': data['address']['addressCountry'],\n 'ref': data['@id'],\n 'website': data['url'],\n 'lat': data['geo']['latitude'],\n 'lon': data['geo']['longitude'],\n 'name': data['name'],\n }\n\n hours = self.parse_hours(data.get(\"openingHoursSpecification\", []))\n if hours:\n properties[\"opening_hours\"] = hours\n\n yield GeojsonPointItem(**properties)\n\n def parse(self, response):\n response.selector.remove_namespaces()\n city_urls = response.xpath('//url/loc/text()').extract()\n for path in city_urls:\n if path.count('/') == 5:\n yield scrapy.Request(\n path.strip(),\n callback=self.parse_store,\n )\n", "path": "locations/spiders/tgifridays.py"}]}
| 1,164 | 139 |
gh_patches_debug_30186
|
rasdani/github-patches
|
git_diff
|
sunpy__sunpy-5968
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
IRIS SJI maps call undefined header in self.wavelength
### Describe the bug
When creating SJI maps by feeding data and header separately into a Map() and then doing a plot, this causes an error because it calls an undefined header.
### To Reproduce
import glob
from [astropy.io](http://astropy.io/) import fits
from [sunpy.map](http://sunpy.map/) import Map
data_file = glob.glob('IRIS/*_SJI_2832_*fits')
data_file.sort()
hdul = [fits.open](http://fits.open/)(data_file[0])
header = hdul[0].header
data = hdul[0].data
Map(data[0], header).plot()
### What happened?
---------------------------------------------------------------------------
NameError Traceback (most recent call last)
/tmp/ipykernel_73554/1651218312.py in <module>
7 data = hdul[0].data
8
----> 9 Map(data[0], header).plot()
~/SunEnvironment/lib64/python3.8/site-packages/astropy/units/decorators.py in wrapper(*func_args, **func_kwargs)
251 # Call the original function with any equivalencies in force.
252 with add_enabled_equivalencies(self.equivalencies):
--> 253 return_ = wrapped_function(*func_args, **func_kwargs)
254
255 valid_empty = (inspect.Signature.empty, None)
~/SunEnvironment/lib64/python3.8/site-packages/sunpy/map/mapbase.py in plot(self, annotate, axes, title, autoalign, clip_interval, **imshow_kwargs)
2406 plot_settings_title = plot_settings.pop('title')
2407 else:
-> 2408 plot_settings_title = self.latex_name
2409
2410 # Anything left in plot_settings is given to imshow
~/SunEnvironment/lib64/python3.8/site-packages/sunpy/map/mapbase.py in latex_name(self)
735 def latex_name(self):
736 """LaTeX formatted description of the Map."""
--> 737 if isinstance(self.measurement, u.Quantity):
738 return self._base_name().format(measurement=self.measurement._repr_latex_())
739 else:
~/SunEnvironment/lib64/python3.8/site-packages/sunpy/map/mapbase.py in measurement(self)
898 defaults to dimensionless units.
899 """
--> 900 return self.wavelength
901
902 @property
~/SunEnvironment/lib64/python3.8/site-packages/sunpy/map/sources/iris.py in wavelength(self)
61 Taken from WAVELNTH, or if not present TWAVE1.
62 """
---> 63 return header.get('wavelnth', header.get('twave1')) * self.waveunit
64
65 @classmethod
NameError: name 'header' is not defined
### Expected behavior
_No response_
### Screenshots

### System Details
sunpy.__version__ : 3.1.3
astropy.__version__: 4.3.1
### Installation method
pip
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sunpy/map/sources/iris.py`
Content:
```
1
2 import astropy.units as u
3
4 from sunpy.map.mapbase import GenericMap, SpatialPair
5
6 __all__ = ['SJIMap']
7
8
9 class SJIMap(GenericMap):
10 """
11 A 2D IRIS Slit Jaw Imager Map.
12
13 The Interface Region Imaging Spectrograph (IRIS) small explorer spacecraft
14 provides simultaneous spectra and images of the photosphere, chromosphere,
15 transition region, and corona with 0.33 to 0.4 arcsec spatial resolution,
16 2-second temporal resolution and 1 km/s velocity resolution over a
17 field-of- view of up to 175 arcsec by 175 arcsec. IRIS consists of a 19-cm
18 UV telescope that feeds a slit-based dual-bandpass imaging spectrograph.
19
20 Slit-jaw images in four different passbands (C ii 1330, Si iv 1400,
21 Mg ii k 2796 and Mg ii wing 2830 A) can be taken simultaneously with
22 spectral rasters that sample regions up to 130 arcsec by 175 arcsec at a
23 variety of spatial samplings (from 0.33 arcsec and up).
24 IRIS is sensitive to emission from plasma at temperatures between
25 5000 K and 10 MK.
26
27 IRIS was launched into a Sun-synchronous orbit on 27 June 2013.
28
29 .. warning::
30
31 This object can only handle level 1 SJI files.
32
33 References
34 ----------
35 * `IRIS Mission Page <https://iris.lmsal.com>`_
36 * `IRIS Analysis Guide <https://iris.lmsal.com/itn26/itn26.pdf>`_
37 * `IRIS Instrument Paper <https://doi.org/10.1007/s11207-014-0485-y>`_
38 """
39 @property
40 def detector(self):
41 return "SJI"
42
43 @property
44 def spatial_units(self):
45 """
46 If not present in CUNIT{1,2} keywords, defaults to arcsec.
47 """
48 return SpatialPair(u.Unit(self.meta.get('cunit1', 'arcsec')),
49 u.Unit(self.meta.get('cunit2', 'arcsec')))
50
51 @property
52 def waveunit(self):
53 """
54 Taken from WAVEUNIT, or if not present defaults to Angstrom.
55 """
56 return u.Unit(header.get('waveunit', "Angstrom"))
57
58 @property
59 def wavelength(self):
60 """
61 Taken from WAVELNTH, or if not present TWAVE1.
62 """
63 return header.get('wavelnth', header.get('twave1')) * self.waveunit
64
65 @classmethod
66 def is_datasource_for(cls, data, header, **kwargs):
67 """Determines if header corresponds to an IRIS SJI image"""
68 tele = str(header.get('TELESCOP', '')).startswith('IRIS')
69 obs = str(header.get('INSTRUME', '')).startswith('SJI')
70 level = header.get('lvl_num') == 1
71 return tele and obs
72
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/sunpy/map/sources/iris.py b/sunpy/map/sources/iris.py
--- a/sunpy/map/sources/iris.py
+++ b/sunpy/map/sources/iris.py
@@ -26,10 +26,6 @@
IRIS was launched into a Sun-synchronous orbit on 27 June 2013.
- .. warning::
-
- This object can only handle level 1 SJI files.
-
References
----------
* `IRIS Mission Page <https://iris.lmsal.com>`_
@@ -53,19 +49,27 @@
"""
Taken from WAVEUNIT, or if not present defaults to Angstrom.
"""
- return u.Unit(header.get('waveunit', "Angstrom"))
+ return u.Unit(self.meta.get('waveunit', "Angstrom"))
@property
def wavelength(self):
"""
Taken from WAVELNTH, or if not present TWAVE1.
"""
- return header.get('wavelnth', header.get('twave1')) * self.waveunit
+ return self.meta.get('wavelnth', self.meta.get('twave1')) * self.waveunit
+
+ @property
+ def unit(self):
+ unit_str = self.meta.get('bunit', None)
+ if unit_str is None:
+ return
+ # Remove "corrected" so that the unit can be parsed
+ unit_str = unit_str.lower().replace('corrected', '').strip()
+ return self._parse_fits_unit(unit_str)
@classmethod
def is_datasource_for(cls, data, header, **kwargs):
"""Determines if header corresponds to an IRIS SJI image"""
tele = str(header.get('TELESCOP', '')).startswith('IRIS')
obs = str(header.get('INSTRUME', '')).startswith('SJI')
- level = header.get('lvl_num') == 1
return tele and obs
|
{"golden_diff": "diff --git a/sunpy/map/sources/iris.py b/sunpy/map/sources/iris.py\n--- a/sunpy/map/sources/iris.py\n+++ b/sunpy/map/sources/iris.py\n@@ -26,10 +26,6 @@\n \n IRIS was launched into a Sun-synchronous orbit on 27 June 2013.\n \n- .. warning::\n-\n- This object can only handle level 1 SJI files.\n-\n References\n ----------\n * `IRIS Mission Page <https://iris.lmsal.com>`_\n@@ -53,19 +49,27 @@\n \"\"\"\n Taken from WAVEUNIT, or if not present defaults to Angstrom.\n \"\"\"\n- return u.Unit(header.get('waveunit', \"Angstrom\"))\n+ return u.Unit(self.meta.get('waveunit', \"Angstrom\"))\n \n @property\n def wavelength(self):\n \"\"\"\n Taken from WAVELNTH, or if not present TWAVE1.\n \"\"\"\n- return header.get('wavelnth', header.get('twave1')) * self.waveunit\n+ return self.meta.get('wavelnth', self.meta.get('twave1')) * self.waveunit\n+\n+ @property\n+ def unit(self):\n+ unit_str = self.meta.get('bunit', None)\n+ if unit_str is None:\n+ return\n+ # Remove \"corrected\" so that the unit can be parsed\n+ unit_str = unit_str.lower().replace('corrected', '').strip()\n+ return self._parse_fits_unit(unit_str)\n \n @classmethod\n def is_datasource_for(cls, data, header, **kwargs):\n \"\"\"Determines if header corresponds to an IRIS SJI image\"\"\"\n tele = str(header.get('TELESCOP', '')).startswith('IRIS')\n obs = str(header.get('INSTRUME', '')).startswith('SJI')\n- level = header.get('lvl_num') == 1\n return tele and obs\n", "issue": "IRIS SJI maps call undefined header in self.wavelength\n### Describe the bug\n\nWhen creating SJI maps by feeding data and header separately into a Map() and then doing a plot, this causes an error because it calls an undefined header.\n\n### To Reproduce\n\nimport glob\r\nfrom [astropy.io](http://astropy.io/) import fits\r\nfrom [sunpy.map](http://sunpy.map/) import Map\r\n\r\ndata_file = glob.glob('IRIS/*_SJI_2832_*fits')\r\ndata_file.sort()\r\n\r\nhdul = [fits.open](http://fits.open/)(data_file[0])\r\n \r\nheader = hdul[0].header\r\ndata = hdul[0].data\r\n\r\nMap(data[0], header).plot()\n\n### What happened?\n\n---------------------------------------------------------------------------\r\nNameError Traceback (most recent call last)\r\n/tmp/ipykernel_73554/1651218312.py in <module>\r\n 7 data = hdul[0].data\r\n 8 \r\n----> 9 Map(data[0], header).plot()\r\n\r\n~/SunEnvironment/lib64/python3.8/site-packages/astropy/units/decorators.py in wrapper(*func_args, **func_kwargs)\r\n 251 # Call the original function with any equivalencies in force.\r\n 252 with add_enabled_equivalencies(self.equivalencies):\r\n--> 253 return_ = wrapped_function(*func_args, **func_kwargs)\r\n 254 \r\n 255 valid_empty = (inspect.Signature.empty, None)\r\n\r\n~/SunEnvironment/lib64/python3.8/site-packages/sunpy/map/mapbase.py in plot(self, annotate, axes, title, autoalign, clip_interval, **imshow_kwargs)\r\n 2406 plot_settings_title = plot_settings.pop('title')\r\n 2407 else:\r\n-> 2408 plot_settings_title = self.latex_name\r\n 2409 \r\n 2410 # Anything left in plot_settings is given to imshow\r\n\r\n~/SunEnvironment/lib64/python3.8/site-packages/sunpy/map/mapbase.py in latex_name(self)\r\n 735 def latex_name(self):\r\n 736 \"\"\"LaTeX formatted description of the Map.\"\"\"\r\n--> 737 if isinstance(self.measurement, u.Quantity):\r\n 738 return self._base_name().format(measurement=self.measurement._repr_latex_())\r\n 739 else:\r\n\r\n~/SunEnvironment/lib64/python3.8/site-packages/sunpy/map/mapbase.py in measurement(self)\r\n 898 defaults to dimensionless units.\r\n 899 \"\"\"\r\n--> 900 return self.wavelength\r\n 901 \r\n 902 @property\r\n\r\n~/SunEnvironment/lib64/python3.8/site-packages/sunpy/map/sources/iris.py in wavelength(self)\r\n 61 Taken from WAVELNTH, or if not present TWAVE1.\r\n 62 \"\"\"\r\n---> 63 return header.get('wavelnth', header.get('twave1')) * self.waveunit\r\n 64 \r\n 65 @classmethod\r\n\r\nNameError: name 'header' is not defined\r\n\n\n### Expected behavior\n\n_No response_\n\n### Screenshots\n\n\r\n\n\n### System Details\n\nsunpy.__version__ : 3.1.3\r\nastropy.__version__: 4.3.1\n\n### Installation method\n\npip\n", "before_files": [{"content": "\nimport astropy.units as u\n\nfrom sunpy.map.mapbase import GenericMap, SpatialPair\n\n__all__ = ['SJIMap']\n\n\nclass SJIMap(GenericMap):\n \"\"\"\n A 2D IRIS Slit Jaw Imager Map.\n\n The Interface Region Imaging Spectrograph (IRIS) small explorer spacecraft\n provides simultaneous spectra and images of the photosphere, chromosphere,\n transition region, and corona with 0.33 to 0.4 arcsec spatial resolution,\n 2-second temporal resolution and 1 km/s velocity resolution over a\n field-of- view of up to 175 arcsec by 175 arcsec. IRIS consists of a 19-cm\n UV telescope that feeds a slit-based dual-bandpass imaging spectrograph.\n\n Slit-jaw images in four different passbands (C ii 1330, Si iv 1400,\n Mg ii k 2796 and Mg ii wing 2830 A) can be taken simultaneously with\n spectral rasters that sample regions up to 130 arcsec by 175 arcsec at a\n variety of spatial samplings (from 0.33 arcsec and up).\n IRIS is sensitive to emission from plasma at temperatures between\n 5000 K and 10 MK.\n\n IRIS was launched into a Sun-synchronous orbit on 27 June 2013.\n\n .. warning::\n\n This object can only handle level 1 SJI files.\n\n References\n ----------\n * `IRIS Mission Page <https://iris.lmsal.com>`_\n * `IRIS Analysis Guide <https://iris.lmsal.com/itn26/itn26.pdf>`_\n * `IRIS Instrument Paper <https://doi.org/10.1007/s11207-014-0485-y>`_\n \"\"\"\n @property\n def detector(self):\n return \"SJI\"\n\n @property\n def spatial_units(self):\n \"\"\"\n If not present in CUNIT{1,2} keywords, defaults to arcsec.\n \"\"\"\n return SpatialPair(u.Unit(self.meta.get('cunit1', 'arcsec')),\n u.Unit(self.meta.get('cunit2', 'arcsec')))\n\n @property\n def waveunit(self):\n \"\"\"\n Taken from WAVEUNIT, or if not present defaults to Angstrom.\n \"\"\"\n return u.Unit(header.get('waveunit', \"Angstrom\"))\n\n @property\n def wavelength(self):\n \"\"\"\n Taken from WAVELNTH, or if not present TWAVE1.\n \"\"\"\n return header.get('wavelnth', header.get('twave1')) * self.waveunit\n\n @classmethod\n def is_datasource_for(cls, data, header, **kwargs):\n \"\"\"Determines if header corresponds to an IRIS SJI image\"\"\"\n tele = str(header.get('TELESCOP', '')).startswith('IRIS')\n obs = str(header.get('INSTRUME', '')).startswith('SJI')\n level = header.get('lvl_num') == 1\n return tele and obs\n", "path": "sunpy/map/sources/iris.py"}], "after_files": [{"content": "\nimport astropy.units as u\n\nfrom sunpy.map.mapbase import GenericMap, SpatialPair\n\n__all__ = ['SJIMap']\n\n\nclass SJIMap(GenericMap):\n \"\"\"\n A 2D IRIS Slit Jaw Imager Map.\n\n The Interface Region Imaging Spectrograph (IRIS) small explorer spacecraft\n provides simultaneous spectra and images of the photosphere, chromosphere,\n transition region, and corona with 0.33 to 0.4 arcsec spatial resolution,\n 2-second temporal resolution and 1 km/s velocity resolution over a\n field-of- view of up to 175 arcsec by 175 arcsec. IRIS consists of a 19-cm\n UV telescope that feeds a slit-based dual-bandpass imaging spectrograph.\n\n Slit-jaw images in four different passbands (C ii 1330, Si iv 1400,\n Mg ii k 2796 and Mg ii wing 2830 A) can be taken simultaneously with\n spectral rasters that sample regions up to 130 arcsec by 175 arcsec at a\n variety of spatial samplings (from 0.33 arcsec and up).\n IRIS is sensitive to emission from plasma at temperatures between\n 5000 K and 10 MK.\n\n IRIS was launched into a Sun-synchronous orbit on 27 June 2013.\n\n References\n ----------\n * `IRIS Mission Page <https://iris.lmsal.com>`_\n * `IRIS Analysis Guide <https://iris.lmsal.com/itn26/itn26.pdf>`_\n * `IRIS Instrument Paper <https://doi.org/10.1007/s11207-014-0485-y>`_\n \"\"\"\n @property\n def detector(self):\n return \"SJI\"\n\n @property\n def spatial_units(self):\n \"\"\"\n If not present in CUNIT{1,2} keywords, defaults to arcsec.\n \"\"\"\n return SpatialPair(u.Unit(self.meta.get('cunit1', 'arcsec')),\n u.Unit(self.meta.get('cunit2', 'arcsec')))\n\n @property\n def waveunit(self):\n \"\"\"\n Taken from WAVEUNIT, or if not present defaults to Angstrom.\n \"\"\"\n return u.Unit(self.meta.get('waveunit', \"Angstrom\"))\n\n @property\n def wavelength(self):\n \"\"\"\n Taken from WAVELNTH, or if not present TWAVE1.\n \"\"\"\n return self.meta.get('wavelnth', self.meta.get('twave1')) * self.waveunit\n\n @property\n def unit(self):\n unit_str = self.meta.get('bunit', None)\n if unit_str is None:\n return\n # Remove \"corrected\" so that the unit can be parsed\n unit_str = unit_str.lower().replace('corrected', '').strip()\n return self._parse_fits_unit(unit_str)\n\n @classmethod\n def is_datasource_for(cls, data, header, **kwargs):\n \"\"\"Determines if header corresponds to an IRIS SJI image\"\"\"\n tele = str(header.get('TELESCOP', '')).startswith('IRIS')\n obs = str(header.get('INSTRUME', '')).startswith('SJI')\n return tele and obs\n", "path": "sunpy/map/sources/iris.py"}]}
| 1,923 | 442 |
gh_patches_debug_20993
|
rasdani/github-patches
|
git_diff
|
dask__distributed-779
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
distributed-1.15.0rc1 seems wrongly requiring "futures" from a Python-3.6 installation
Collecting futures (from distributed>=1.14; extra == "complete"->dask[complete]->-r C:\Winpython\basedir36
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2
3 import os
4 from setuptools import setup
5 import sys
6 import versioneer
7
8 requires = open('requirements.txt').read().strip().split('\n')
9
10 setup(name='distributed',
11 version=versioneer.get_version(),
12 cmdclass=versioneer.get_cmdclass(),
13 description='Distributed computing',
14 url='https://distributed.readthedocs.io/en/latest/',
15 maintainer='Matthew Rocklin',
16 maintainer_email='[email protected]',
17 license='BSD',
18 package_data={ '': ['templates/index.html'], },
19 include_package_data=True,
20 install_requires=requires,
21 packages=['distributed',
22 'distributed.bokeh',
23 'distributed.bokeh.background',
24 'distributed.bokeh.status',
25 'distributed.bokeh.tasks',
26 'distributed.bokeh.workers',
27 'distributed.cli',
28 'distributed.deploy',
29 'distributed.diagnostics',
30 'distributed.protocol',
31 'distributed.http'],
32 long_description=(open('README.md').read() if os.path.exists('README.md')
33 else ''),
34 entry_points='''
35 [console_scripts]
36 dask-ssh=distributed.cli.dask_ssh:go
37 dask-submit=distributed.cli.dask_submit:go
38 dask-remote=distributed.cli.dask_remote:go
39 dask-scheduler=distributed.cli.dask_scheduler:go
40 dask-worker=distributed.cli.dask_worker:go
41 ''',
42 zip_safe=False)
43
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -6,6 +6,18 @@
import versioneer
requires = open('requirements.txt').read().strip().split('\n')
+install_requires = []
+extras_require = {}
+for r in requires:
+ if ';' in r:
+ # requirements.txt conditional dependencies need to be reformatted for wheels
+ # to the form: `'[extra_name]:condition' : ['requirements']`
+ req, cond = r.split(';', 1)
+ cond = ':' + cond
+ cond_reqs = extras_require.setdefault(cond, [])
+ cond_reqs.append(req)
+ else:
+ install_requires.append(r)
setup(name='distributed',
version=versioneer.get_version(),
@@ -17,7 +29,8 @@
license='BSD',
package_data={ '': ['templates/index.html'], },
include_package_data=True,
- install_requires=requires,
+ install_requires=install_requires,
+ extras_require=extras_require,
packages=['distributed',
'distributed.bokeh',
'distributed.bokeh.background',
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -6,6 +6,18 @@\n import versioneer\n \n requires = open('requirements.txt').read().strip().split('\\n')\n+install_requires = []\n+extras_require = {}\n+for r in requires:\n+ if ';' in r:\n+ # requirements.txt conditional dependencies need to be reformatted for wheels\n+ # to the form: `'[extra_name]:condition' : ['requirements']`\n+ req, cond = r.split(';', 1)\n+ cond = ':' + cond\n+ cond_reqs = extras_require.setdefault(cond, [])\n+ cond_reqs.append(req)\n+ else:\n+ install_requires.append(r)\n \n setup(name='distributed',\n version=versioneer.get_version(),\n@@ -17,7 +29,8 @@\n license='BSD',\n package_data={ '': ['templates/index.html'], },\n include_package_data=True,\n- install_requires=requires,\n+ install_requires=install_requires,\n+ extras_require=extras_require,\n packages=['distributed',\n 'distributed.bokeh',\n 'distributed.bokeh.background',\n", "issue": "distributed-1.15.0rc1 seems wrongly requiring \"futures\" from a Python-3.6 installation\nCollecting futures (from distributed>=1.14; extra == \"complete\"->dask[complete]->-r C:\\Winpython\\basedir36\n", "before_files": [{"content": "#!/usr/bin/env python\n\nimport os\nfrom setuptools import setup\nimport sys\nimport versioneer\n\nrequires = open('requirements.txt').read().strip().split('\\n')\n\nsetup(name='distributed',\n version=versioneer.get_version(),\n cmdclass=versioneer.get_cmdclass(),\n description='Distributed computing',\n url='https://distributed.readthedocs.io/en/latest/',\n maintainer='Matthew Rocklin',\n maintainer_email='[email protected]',\n license='BSD',\n package_data={ '': ['templates/index.html'], },\n include_package_data=True,\n install_requires=requires,\n packages=['distributed',\n 'distributed.bokeh',\n 'distributed.bokeh.background',\n 'distributed.bokeh.status',\n 'distributed.bokeh.tasks',\n 'distributed.bokeh.workers',\n 'distributed.cli',\n 'distributed.deploy',\n 'distributed.diagnostics',\n 'distributed.protocol',\n 'distributed.http'],\n long_description=(open('README.md').read() if os.path.exists('README.md')\n else ''),\n entry_points='''\n [console_scripts]\n dask-ssh=distributed.cli.dask_ssh:go\n dask-submit=distributed.cli.dask_submit:go\n dask-remote=distributed.cli.dask_remote:go\n dask-scheduler=distributed.cli.dask_scheduler:go\n dask-worker=distributed.cli.dask_worker:go\n ''',\n zip_safe=False)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\nimport os\nfrom setuptools import setup\nimport sys\nimport versioneer\n\nrequires = open('requirements.txt').read().strip().split('\\n')\ninstall_requires = []\nextras_require = {}\nfor r in requires:\n if ';' in r:\n # requirements.txt conditional dependencies need to be reformatted for wheels\n # to the form: `'[extra_name]:condition' : ['requirements']`\n req, cond = r.split(';', 1)\n cond = ':' + cond\n cond_reqs = extras_require.setdefault(cond, [])\n cond_reqs.append(req)\n else:\n install_requires.append(r)\n\nsetup(name='distributed',\n version=versioneer.get_version(),\n cmdclass=versioneer.get_cmdclass(),\n description='Distributed computing',\n url='https://distributed.readthedocs.io/en/latest/',\n maintainer='Matthew Rocklin',\n maintainer_email='[email protected]',\n license='BSD',\n package_data={ '': ['templates/index.html'], },\n include_package_data=True,\n install_requires=install_requires,\n extras_require=extras_require,\n packages=['distributed',\n 'distributed.bokeh',\n 'distributed.bokeh.background',\n 'distributed.bokeh.status',\n 'distributed.bokeh.tasks',\n 'distributed.bokeh.workers',\n 'distributed.cli',\n 'distributed.deploy',\n 'distributed.diagnostics',\n 'distributed.protocol',\n 'distributed.http'],\n long_description=(open('README.md').read() if os.path.exists('README.md')\n else ''),\n entry_points='''\n [console_scripts]\n dask-ssh=distributed.cli.dask_ssh:go\n dask-submit=distributed.cli.dask_submit:go\n dask-remote=distributed.cli.dask_remote:go\n dask-scheduler=distributed.cli.dask_scheduler:go\n dask-worker=distributed.cli.dask_worker:go\n ''',\n zip_safe=False)\n", "path": "setup.py"}]}
| 699 | 249 |
gh_patches_debug_15546
|
rasdani/github-patches
|
git_diff
|
rlworkgroup__garage-605
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
variable initialization in local_tf_runner is slow and incorrect
The variable initialization code in function `initialize_tf_vars` of `garage/experiment/local_tf_runner.py` is written in a way that's both very inefficient and potentially incorrect. In particular, the list comprehension
````[
v for v in tf.global_variables()
if v.name.split(':')[0] in str(
self.sess.run(tf.report_uninitialized_variables()))
]
````
reruns the `tf.report_uninitialized_variables()` op every time the condition needs to be evaluated, which is very slow for larger graphs. It takes me several minutes to run for a convnet with 128x128x4 pixel inputs. Storing the result of this operation reduces runtime to mere seconds.
Further, the use of string operations can potentially lead to re-initializing some variables. If the name of some initialised variable A is a substring of an uninitialised variable B, the above list comprehension will include variable A in the result. A better alternative is to decode the bytestrings returned by `sess.run(tf.report_uninitialized_variables()` and construct a set.
The function code then becomes
````uninit_set = set(
e.decode() for e in self.sess.run(tf.report_uninitialized_variables())
)
self.sess.run(
tf.variables_initializer([
v for v in tf.global_variables()
if v.name.split(':')[0] in uninit_set
]))
````
If this idea seems reasonable, I am happy to make a pull request.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `garage/experiment/local_tf_runner.py`
Content:
```
1 """
2 The local runner for tensorflow algorithms.
3
4 A runner setup context for algorithms during initialization and
5 pipelines data between sampler and algorithm during training.
6 """
7 import time
8
9 import tensorflow as tf
10
11 from garage.logger import logger
12 from garage.logger import snapshotter
13 from garage.logger import tabular
14
15 # Note: Optional module should be imported ad hoc to break circular dependency.
16
17
18 class LocalRunner:
19 """This class implements a local runner for tensorflow algorithms.
20
21 A local runner provides a default tensorflow session using python context.
22 This is useful for those experiment components (e.g. policy) that require a
23 tensorflow session during construction.
24
25 Use Runner.setup(algo, env) to setup algorithm and environement for runner
26 and Runner.train() to start training.
27
28 Examples:
29 with LocalRunner() as runner:
30 env = gym.make('CartPole-v1')
31 policy = CategoricalMLPPolicy(
32 env_spec=env.spec,
33 hidden_sizes=(32, 32))
34 algo = TRPO(
35 env=env,
36 policy=policy,
37 baseline=baseline,
38 max_path_length=100,
39 discount=0.99,
40 max_kl_step=0.01)
41 runner.setup(algo, env)
42 runner.train(n_epochs=100, batch_size=4000)
43
44 """
45
46 def __init__(self, sess=None, max_cpus=1):
47 """Create a new local runner.
48
49 Args:
50 max_cpus: The maximum number of parallel sampler workers.
51 sess: An optional tensorflow session.
52 A new session will be created immediately if not provided.
53
54 Note:
55 The local runner will set up a joblib task pool of size max_cpus
56 possibly later used by BatchSampler. If BatchSampler is not used,
57 the processes in the pool will remain dormant.
58
59 This setup is required to use tensorflow in a multiprocess
60 environment before a tensorflow session is created
61 because tensorflow is not fork-safe.
62
63 See https://github.com/tensorflow/tensorflow/issues/2448.
64
65 """
66 if max_cpus > 1:
67 from garage.sampler import singleton_pool
68 singleton_pool.initialize(max_cpus)
69 self.sess = sess or tf.Session()
70 self.has_setup = False
71 self.plot = False
72
73 def __enter__(self):
74 """Set self.sess as the default session.
75
76 Returns:
77 This local runner.
78
79 """
80 if tf.get_default_session() is not self.sess:
81 self.sess.__enter__()
82 return self
83
84 def __exit__(self, exc_type, exc_val, exc_tb):
85 """Leave session."""
86 if tf.get_default_session() is self.sess:
87 self.sess.__exit__(exc_type, exc_val, exc_tb)
88
89 def setup(self, algo, env, sampler_cls=None, sampler_args=None):
90 """Set up runner for algorithm and environment.
91
92 This method saves algo and env within runner and creates a sampler.
93
94 Note:
95 After setup() is called all variables in session should have been
96 initialized. setup() respects existing values in session so
97 policy weights can be loaded before setup().
98
99 Args:
100 algo: An algorithm instance.
101 env: An environement instance.
102 sampler_cls: A sampler class.
103 sampler_args: Arguments to be passed to sampler constructor.
104
105 """
106 self.algo = algo
107 self.env = env
108 self.policy = self.algo.policy
109
110 if sampler_args is None:
111 sampler_args = {}
112
113 if sampler_cls is None:
114 from garage.tf.algos.batch_polopt import BatchPolopt
115 if isinstance(algo, BatchPolopt):
116 if self.policy.vectorized:
117 from garage.tf.samplers import OnPolicyVectorizedSampler
118 sampler_cls = OnPolicyVectorizedSampler
119 else:
120 from garage.tf.samplers import BatchSampler
121 sampler_cls = BatchSampler
122 else:
123 from garage.tf.samplers import OffPolicyVectorizedSampler
124 sampler_cls = OffPolicyVectorizedSampler
125
126 self.sampler = sampler_cls(algo, env, **sampler_args)
127
128 self.initialize_tf_vars()
129 logger.log(self.sess.graph)
130 self.has_setup = True
131
132 def initialize_tf_vars(self):
133 """Initialize all uninitialized variables in session."""
134 with tf.name_scope("initialize_tf_vars"):
135 self.sess.run(
136 tf.variables_initializer([
137 v for v in tf.global_variables()
138 if v.name.split(':')[0] in str(
139 self.sess.run(tf.report_uninitialized_variables()))
140 ]))
141
142 def start_worker(self):
143 """Start Plotter and Sampler workers."""
144 self.sampler.start_worker()
145 if self.plot:
146 from garage.tf.plotter import Plotter
147 self.plotter = Plotter(self.env, self.policy)
148 self.plotter.start()
149
150 def shutdown_worker(self):
151 """Shutdown Plotter and Sampler workers."""
152 self.sampler.shutdown_worker()
153 if self.plot:
154 self.plotter.close()
155
156 def obtain_samples(self, itr, batch_size):
157 """Obtain one batch of samples.
158
159 Args:
160 itr: Index of iteration (epoch).
161 batch_size: Number of steps in batch.
162 This is a hint that the sampler may or may not respect.
163
164 Returns:
165 One batch of samples.
166
167 """
168 if self.n_epoch_cycles == 1:
169 logger.log('Obtaining samples...')
170 return self.sampler.obtain_samples(itr, batch_size)
171
172 def save_snapshot(self, itr, paths=None):
173 """Save snapshot of current batch.
174
175 Args:
176 itr: Index of iteration (epoch).
177 paths: Batch of samples after preprocessed.
178
179 """
180 assert self.has_setup
181
182 logger.log("Saving snapshot...")
183 params = self.algo.get_itr_snapshot(itr)
184 params['env'] = self.env
185 if paths:
186 params['paths'] = paths
187 snapshotter.save_itr_params(itr, params)
188 logger.log('Saved')
189
190 def log_diagnostics(self, pause_for_plot=False):
191 """Log diagnostics.
192
193 Args:
194 pause_for_plot: Pause for plot.
195
196 """
197 logger.log('Time %.2f s' % (time.time() - self.start_time))
198 logger.log('EpochTime %.2f s' % (time.time() - self.itr_start_time))
199 logger.log(tabular)
200 if self.plot:
201 self.plotter.update_plot(self.policy, self.algo.max_path_length)
202 if pause_for_plot:
203 input('Plotting evaluation run: Press Enter to " "continue...')
204
205 def train(self,
206 n_epochs,
207 n_epoch_cycles=1,
208 batch_size=None,
209 plot=False,
210 store_paths=False,
211 pause_for_plot=False):
212 """Start training.
213
214 Args:
215 n_epochs: Number of epochs.
216 n_epoch_cycles: Number of batches of samples in each epoch.
217 This is only useful for off-policy algorithm.
218 For on-policy algorithm this value should always be 1.
219 batch_size: Number of steps in batch.
220 plot: Visualize policy by doing rollout after each epoch.
221 store_paths: Save paths in snapshot.
222 pause_for_plot: Pause for plot.
223
224 Returns:
225 The average return in last epoch cycle.
226
227 """
228 assert self.has_setup, ('Use Runner.setup() to setup runner before '
229 'training.')
230 if batch_size is None:
231 from garage.tf.samplers import OffPolicyVectorizedSampler
232 if isinstance(self.sampler, OffPolicyVectorizedSampler):
233 batch_size = self.algo.max_path_length
234 else:
235 batch_size = 40 * self.algo.max_path_length
236
237 self.n_epoch_cycles = n_epoch_cycles
238
239 self.plot = plot
240 self.start_worker()
241 self.start_time = time.time()
242
243 itr = 0
244 last_return = None
245 for epoch in range(n_epochs):
246 self.itr_start_time = time.time()
247 paths = None
248 with logger.prefix('epoch #%d | ' % epoch):
249 for cycle in range(n_epoch_cycles):
250 paths = self.obtain_samples(itr, batch_size)
251 paths = self.sampler.process_samples(itr, paths)
252 last_return = self.algo.train_once(itr, paths)
253 itr += 1
254 self.save_snapshot(epoch, paths if store_paths else None)
255 self.log_diagnostics(pause_for_plot)
256 logger.dump_all(itr)
257 tabular.clear()
258
259 self.shutdown_worker()
260 return last_return
261
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/garage/experiment/local_tf_runner.py b/garage/experiment/local_tf_runner.py
--- a/garage/experiment/local_tf_runner.py
+++ b/garage/experiment/local_tf_runner.py
@@ -132,11 +132,14 @@
def initialize_tf_vars(self):
"""Initialize all uninitialized variables in session."""
with tf.name_scope("initialize_tf_vars"):
+ uninited_set = [
+ e.decode()
+ for e in self.sess.run(tf.report_uninitialized_variables())
+ ]
self.sess.run(
tf.variables_initializer([
v for v in tf.global_variables()
- if v.name.split(':')[0] in str(
- self.sess.run(tf.report_uninitialized_variables()))
+ if v.name.split(':')[0] in uninited_set
]))
def start_worker(self):
|
{"golden_diff": "diff --git a/garage/experiment/local_tf_runner.py b/garage/experiment/local_tf_runner.py\n--- a/garage/experiment/local_tf_runner.py\n+++ b/garage/experiment/local_tf_runner.py\n@@ -132,11 +132,14 @@\n def initialize_tf_vars(self):\n \"\"\"Initialize all uninitialized variables in session.\"\"\"\n with tf.name_scope(\"initialize_tf_vars\"):\n+ uninited_set = [\n+ e.decode()\n+ for e in self.sess.run(tf.report_uninitialized_variables())\n+ ]\n self.sess.run(\n tf.variables_initializer([\n v for v in tf.global_variables()\n- if v.name.split(':')[0] in str(\n- self.sess.run(tf.report_uninitialized_variables()))\n+ if v.name.split(':')[0] in uninited_set\n ]))\n \n def start_worker(self):\n", "issue": "variable initialization in local_tf_runner is slow and incorrect\nThe variable initialization code in function `initialize_tf_vars` of `garage/experiment/local_tf_runner.py` is written in a way that's both very inefficient and potentially incorrect. In particular, the list comprehension\r\n````[\r\n v for v in tf.global_variables()\r\n if v.name.split(':')[0] in str(\r\n self.sess.run(tf.report_uninitialized_variables()))\r\n]\r\n````\r\nreruns the `tf.report_uninitialized_variables()` op every time the condition needs to be evaluated, which is very slow for larger graphs. It takes me several minutes to run for a convnet with 128x128x4 pixel inputs. Storing the result of this operation reduces runtime to mere seconds.\r\n\r\nFurther, the use of string operations can potentially lead to re-initializing some variables. If the name of some initialised variable A is a substring of an uninitialised variable B, the above list comprehension will include variable A in the result. A better alternative is to decode the bytestrings returned by `sess.run(tf.report_uninitialized_variables()` and construct a set.\r\n\r\nThe function code then becomes\r\n\r\n````uninit_set = set(\r\n e.decode() for e in self.sess.run(tf.report_uninitialized_variables())\r\n )\r\n self.sess.run(\r\n tf.variables_initializer([\r\n v for v in tf.global_variables()\r\n if v.name.split(':')[0] in uninit_set\r\n ]))\r\n````\r\n\r\nIf this idea seems reasonable, I am happy to make a pull request.\n", "before_files": [{"content": "\"\"\"\nThe local runner for tensorflow algorithms.\n\nA runner setup context for algorithms during initialization and\npipelines data between sampler and algorithm during training.\n\"\"\"\nimport time\n\nimport tensorflow as tf\n\nfrom garage.logger import logger\nfrom garage.logger import snapshotter\nfrom garage.logger import tabular\n\n# Note: Optional module should be imported ad hoc to break circular dependency.\n\n\nclass LocalRunner:\n \"\"\"This class implements a local runner for tensorflow algorithms.\n\n A local runner provides a default tensorflow session using python context.\n This is useful for those experiment components (e.g. policy) that require a\n tensorflow session during construction.\n\n Use Runner.setup(algo, env) to setup algorithm and environement for runner\n and Runner.train() to start training.\n\n Examples:\n with LocalRunner() as runner:\n env = gym.make('CartPole-v1')\n policy = CategoricalMLPPolicy(\n env_spec=env.spec,\n hidden_sizes=(32, 32))\n algo = TRPO(\n env=env,\n policy=policy,\n baseline=baseline,\n max_path_length=100,\n discount=0.99,\n max_kl_step=0.01)\n runner.setup(algo, env)\n runner.train(n_epochs=100, batch_size=4000)\n\n \"\"\"\n\n def __init__(self, sess=None, max_cpus=1):\n \"\"\"Create a new local runner.\n\n Args:\n max_cpus: The maximum number of parallel sampler workers.\n sess: An optional tensorflow session.\n A new session will be created immediately if not provided.\n\n Note:\n The local runner will set up a joblib task pool of size max_cpus\n possibly later used by BatchSampler. If BatchSampler is not used,\n the processes in the pool will remain dormant.\n\n This setup is required to use tensorflow in a multiprocess\n environment before a tensorflow session is created\n because tensorflow is not fork-safe.\n\n See https://github.com/tensorflow/tensorflow/issues/2448.\n\n \"\"\"\n if max_cpus > 1:\n from garage.sampler import singleton_pool\n singleton_pool.initialize(max_cpus)\n self.sess = sess or tf.Session()\n self.has_setup = False\n self.plot = False\n\n def __enter__(self):\n \"\"\"Set self.sess as the default session.\n\n Returns:\n This local runner.\n\n \"\"\"\n if tf.get_default_session() is not self.sess:\n self.sess.__enter__()\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n \"\"\"Leave session.\"\"\"\n if tf.get_default_session() is self.sess:\n self.sess.__exit__(exc_type, exc_val, exc_tb)\n\n def setup(self, algo, env, sampler_cls=None, sampler_args=None):\n \"\"\"Set up runner for algorithm and environment.\n\n This method saves algo and env within runner and creates a sampler.\n\n Note:\n After setup() is called all variables in session should have been\n initialized. setup() respects existing values in session so\n policy weights can be loaded before setup().\n\n Args:\n algo: An algorithm instance.\n env: An environement instance.\n sampler_cls: A sampler class.\n sampler_args: Arguments to be passed to sampler constructor.\n\n \"\"\"\n self.algo = algo\n self.env = env\n self.policy = self.algo.policy\n\n if sampler_args is None:\n sampler_args = {}\n\n if sampler_cls is None:\n from garage.tf.algos.batch_polopt import BatchPolopt\n if isinstance(algo, BatchPolopt):\n if self.policy.vectorized:\n from garage.tf.samplers import OnPolicyVectorizedSampler\n sampler_cls = OnPolicyVectorizedSampler\n else:\n from garage.tf.samplers import BatchSampler\n sampler_cls = BatchSampler\n else:\n from garage.tf.samplers import OffPolicyVectorizedSampler\n sampler_cls = OffPolicyVectorizedSampler\n\n self.sampler = sampler_cls(algo, env, **sampler_args)\n\n self.initialize_tf_vars()\n logger.log(self.sess.graph)\n self.has_setup = True\n\n def initialize_tf_vars(self):\n \"\"\"Initialize all uninitialized variables in session.\"\"\"\n with tf.name_scope(\"initialize_tf_vars\"):\n self.sess.run(\n tf.variables_initializer([\n v for v in tf.global_variables()\n if v.name.split(':')[0] in str(\n self.sess.run(tf.report_uninitialized_variables()))\n ]))\n\n def start_worker(self):\n \"\"\"Start Plotter and Sampler workers.\"\"\"\n self.sampler.start_worker()\n if self.plot:\n from garage.tf.plotter import Plotter\n self.plotter = Plotter(self.env, self.policy)\n self.plotter.start()\n\n def shutdown_worker(self):\n \"\"\"Shutdown Plotter and Sampler workers.\"\"\"\n self.sampler.shutdown_worker()\n if self.plot:\n self.plotter.close()\n\n def obtain_samples(self, itr, batch_size):\n \"\"\"Obtain one batch of samples.\n\n Args:\n itr: Index of iteration (epoch).\n batch_size: Number of steps in batch.\n This is a hint that the sampler may or may not respect.\n\n Returns:\n One batch of samples.\n\n \"\"\"\n if self.n_epoch_cycles == 1:\n logger.log('Obtaining samples...')\n return self.sampler.obtain_samples(itr, batch_size)\n\n def save_snapshot(self, itr, paths=None):\n \"\"\"Save snapshot of current batch.\n\n Args:\n itr: Index of iteration (epoch).\n paths: Batch of samples after preprocessed.\n\n \"\"\"\n assert self.has_setup\n\n logger.log(\"Saving snapshot...\")\n params = self.algo.get_itr_snapshot(itr)\n params['env'] = self.env\n if paths:\n params['paths'] = paths\n snapshotter.save_itr_params(itr, params)\n logger.log('Saved')\n\n def log_diagnostics(self, pause_for_plot=False):\n \"\"\"Log diagnostics.\n\n Args:\n pause_for_plot: Pause for plot.\n\n \"\"\"\n logger.log('Time %.2f s' % (time.time() - self.start_time))\n logger.log('EpochTime %.2f s' % (time.time() - self.itr_start_time))\n logger.log(tabular)\n if self.plot:\n self.plotter.update_plot(self.policy, self.algo.max_path_length)\n if pause_for_plot:\n input('Plotting evaluation run: Press Enter to \" \"continue...')\n\n def train(self,\n n_epochs,\n n_epoch_cycles=1,\n batch_size=None,\n plot=False,\n store_paths=False,\n pause_for_plot=False):\n \"\"\"Start training.\n\n Args:\n n_epochs: Number of epochs.\n n_epoch_cycles: Number of batches of samples in each epoch.\n This is only useful for off-policy algorithm.\n For on-policy algorithm this value should always be 1.\n batch_size: Number of steps in batch.\n plot: Visualize policy by doing rollout after each epoch.\n store_paths: Save paths in snapshot.\n pause_for_plot: Pause for plot.\n\n Returns:\n The average return in last epoch cycle.\n\n \"\"\"\n assert self.has_setup, ('Use Runner.setup() to setup runner before '\n 'training.')\n if batch_size is None:\n from garage.tf.samplers import OffPolicyVectorizedSampler\n if isinstance(self.sampler, OffPolicyVectorizedSampler):\n batch_size = self.algo.max_path_length\n else:\n batch_size = 40 * self.algo.max_path_length\n\n self.n_epoch_cycles = n_epoch_cycles\n\n self.plot = plot\n self.start_worker()\n self.start_time = time.time()\n\n itr = 0\n last_return = None\n for epoch in range(n_epochs):\n self.itr_start_time = time.time()\n paths = None\n with logger.prefix('epoch #%d | ' % epoch):\n for cycle in range(n_epoch_cycles):\n paths = self.obtain_samples(itr, batch_size)\n paths = self.sampler.process_samples(itr, paths)\n last_return = self.algo.train_once(itr, paths)\n itr += 1\n self.save_snapshot(epoch, paths if store_paths else None)\n self.log_diagnostics(pause_for_plot)\n logger.dump_all(itr)\n tabular.clear()\n\n self.shutdown_worker()\n return last_return\n", "path": "garage/experiment/local_tf_runner.py"}], "after_files": [{"content": "\"\"\"\nThe local runner for tensorflow algorithms.\n\nA runner setup context for algorithms during initialization and\npipelines data between sampler and algorithm during training.\n\"\"\"\nimport time\n\nimport tensorflow as tf\n\nfrom garage.logger import logger\nfrom garage.logger import snapshotter\nfrom garage.logger import tabular\n\n# Note: Optional module should be imported ad hoc to break circular dependency.\n\n\nclass LocalRunner:\n \"\"\"This class implements a local runner for tensorflow algorithms.\n\n A local runner provides a default tensorflow session using python context.\n This is useful for those experiment components (e.g. policy) that require a\n tensorflow session during construction.\n\n Use Runner.setup(algo, env) to setup algorithm and environement for runner\n and Runner.train() to start training.\n\n Examples:\n with LocalRunner() as runner:\n env = gym.make('CartPole-v1')\n policy = CategoricalMLPPolicy(\n env_spec=env.spec,\n hidden_sizes=(32, 32))\n algo = TRPO(\n env=env,\n policy=policy,\n baseline=baseline,\n max_path_length=100,\n discount=0.99,\n max_kl_step=0.01)\n runner.setup(algo, env)\n runner.train(n_epochs=100, batch_size=4000)\n\n \"\"\"\n\n def __init__(self, sess=None, max_cpus=1):\n \"\"\"Create a new local runner.\n\n Args:\n max_cpus: The maximum number of parallel sampler workers.\n sess: An optional tensorflow session.\n A new session will be created immediately if not provided.\n\n Note:\n The local runner will set up a joblib task pool of size max_cpus\n possibly later used by BatchSampler. If BatchSampler is not used,\n the processes in the pool will remain dormant.\n\n This setup is required to use tensorflow in a multiprocess\n environment before a tensorflow session is created\n because tensorflow is not fork-safe.\n\n See https://github.com/tensorflow/tensorflow/issues/2448.\n\n \"\"\"\n if max_cpus > 1:\n from garage.sampler import singleton_pool\n singleton_pool.initialize(max_cpus)\n self.sess = sess or tf.Session()\n self.has_setup = False\n self.plot = False\n\n def __enter__(self):\n \"\"\"Set self.sess as the default session.\n\n Returns:\n This local runner.\n\n \"\"\"\n if tf.get_default_session() is not self.sess:\n self.sess.__enter__()\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n \"\"\"Leave session.\"\"\"\n if tf.get_default_session() is self.sess:\n self.sess.__exit__(exc_type, exc_val, exc_tb)\n\n def setup(self, algo, env, sampler_cls=None, sampler_args=None):\n \"\"\"Set up runner for algorithm and environment.\n\n This method saves algo and env within runner and creates a sampler.\n\n Note:\n After setup() is called all variables in session should have been\n initialized. setup() respects existing values in session so\n policy weights can be loaded before setup().\n\n Args:\n algo: An algorithm instance.\n env: An environement instance.\n sampler_cls: A sampler class.\n sampler_args: Arguments to be passed to sampler constructor.\n\n \"\"\"\n self.algo = algo\n self.env = env\n self.policy = self.algo.policy\n\n if sampler_args is None:\n sampler_args = {}\n\n if sampler_cls is None:\n from garage.tf.algos.batch_polopt import BatchPolopt\n if isinstance(algo, BatchPolopt):\n if self.policy.vectorized:\n from garage.tf.samplers import OnPolicyVectorizedSampler\n sampler_cls = OnPolicyVectorizedSampler\n else:\n from garage.tf.samplers import BatchSampler\n sampler_cls = BatchSampler\n else:\n from garage.tf.samplers import OffPolicyVectorizedSampler\n sampler_cls = OffPolicyVectorizedSampler\n\n self.sampler = sampler_cls(algo, env, **sampler_args)\n\n self.initialize_tf_vars()\n logger.log(self.sess.graph)\n self.has_setup = True\n\n def initialize_tf_vars(self):\n \"\"\"Initialize all uninitialized variables in session.\"\"\"\n with tf.name_scope(\"initialize_tf_vars\"):\n uninited_set = [\n e.decode()\n for e in self.sess.run(tf.report_uninitialized_variables())\n ]\n self.sess.run(\n tf.variables_initializer([\n v for v in tf.global_variables()\n if v.name.split(':')[0] in uninited_set\n ]))\n\n def start_worker(self):\n \"\"\"Start Plotter and Sampler workers.\"\"\"\n self.sampler.start_worker()\n if self.plot:\n from garage.tf.plotter import Plotter\n self.plotter = Plotter(self.env, self.policy)\n self.plotter.start()\n\n def shutdown_worker(self):\n \"\"\"Shutdown Plotter and Sampler workers.\"\"\"\n self.sampler.shutdown_worker()\n if self.plot:\n self.plotter.close()\n\n def obtain_samples(self, itr, batch_size):\n \"\"\"Obtain one batch of samples.\n\n Args:\n itr: Index of iteration (epoch).\n batch_size: Number of steps in batch.\n This is a hint that the sampler may or may not respect.\n\n Returns:\n One batch of samples.\n\n \"\"\"\n if self.n_epoch_cycles == 1:\n logger.log('Obtaining samples...')\n return self.sampler.obtain_samples(itr, batch_size)\n\n def save_snapshot(self, itr, paths=None):\n \"\"\"Save snapshot of current batch.\n\n Args:\n itr: Index of iteration (epoch).\n paths: Batch of samples after preprocessed.\n\n \"\"\"\n assert self.has_setup\n\n logger.log(\"Saving snapshot...\")\n params = self.algo.get_itr_snapshot(itr)\n params['env'] = self.env\n if paths:\n params['paths'] = paths\n snapshotter.save_itr_params(itr, params)\n logger.log('Saved')\n\n def log_diagnostics(self, pause_for_plot=False):\n \"\"\"Log diagnostics.\n\n Args:\n pause_for_plot: Pause for plot.\n\n \"\"\"\n logger.log('Time %.2f s' % (time.time() - self.start_time))\n logger.log('EpochTime %.2f s' % (time.time() - self.itr_start_time))\n logger.log(tabular)\n if self.plot:\n self.plotter.update_plot(self.policy, self.algo.max_path_length)\n if pause_for_plot:\n input('Plotting evaluation run: Press Enter to \" \"continue...')\n\n def train(self,\n n_epochs,\n n_epoch_cycles=1,\n batch_size=None,\n plot=False,\n store_paths=False,\n pause_for_plot=False):\n \"\"\"Start training.\n\n Args:\n n_epochs: Number of epochs.\n n_epoch_cycles: Number of batches of samples in each epoch.\n This is only useful for off-policy algorithm.\n For on-policy algorithm this value should always be 1.\n batch_size: Number of steps in batch.\n plot: Visualize policy by doing rollout after each epoch.\n store_paths: Save paths in snapshot.\n pause_for_plot: Pause for plot.\n\n Returns:\n The average return in last epoch cycle.\n\n \"\"\"\n assert self.has_setup, ('Use Runner.setup() to setup runner before '\n 'training.')\n if batch_size is None:\n from garage.tf.samplers import OffPolicyVectorizedSampler\n if isinstance(self.sampler, OffPolicyVectorizedSampler):\n batch_size = self.algo.max_path_length\n else:\n batch_size = 40 * self.algo.max_path_length\n\n self.n_epoch_cycles = n_epoch_cycles\n\n self.plot = plot\n self.start_worker()\n self.start_time = time.time()\n\n itr = 0\n last_return = None\n for epoch in range(n_epochs):\n self.itr_start_time = time.time()\n paths = None\n with logger.prefix('epoch #%d | ' % epoch):\n for cycle in range(n_epoch_cycles):\n paths = self.obtain_samples(itr, batch_size)\n paths = self.sampler.process_samples(itr, paths)\n last_return = self.algo.train_once(itr, paths)\n itr += 1\n self.save_snapshot(epoch, paths if store_paths else None)\n self.log_diagnostics(pause_for_plot)\n logger.dump_all(itr)\n tabular.clear()\n\n self.shutdown_worker()\n return last_return\n", "path": "garage/experiment/local_tf_runner.py"}]}
| 3,057 | 185 |
gh_patches_debug_13258
|
rasdani/github-patches
|
git_diff
|
lutris__lutris-2955
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
PCSX2 Runner: Add config path/file options (Feature request)
It would be nice to have a way to specify a config file as for example some games run better with a multi-threaded microVU than others. It would also enable to have different window sizes set for those seeking square pixels as some NTSC games run at 640x448 and others use 512x448. Same goes for PAL region games. :slightly_smiling_face:
The command line is: `PCSX2 --cfg=<str>`
I'm absolutely fine if you put it on low priority as probably only few people use PCSX2 anyways. :wink:
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lutris/runners/pcsx2.py`
Content:
```
1 # Standard Library
2 from gettext import gettext as _
3
4 # Lutris Modules
5 from lutris.runners.runner import Runner
6 from lutris.util import system
7
8
9 class pcsx2(Runner):
10 human_name = _("PCSX2")
11 description = _("PlayStation 2 emulator")
12 platforms = [_("Sony PlayStation 2")]
13 runnable_alone = True
14 runner_executable = "pcsx2/PCSX2"
15 game_options = [{
16 "option": "main_file",
17 "type": "file",
18 "label": _("ISO file"),
19 "default_path": "game_path",
20 }]
21
22 runner_options = [
23 {
24 "option": "fullscreen",
25 "type": "bool",
26 "label": _("Fullscreen"),
27 "default": False,
28 },
29 {
30 "option": "full_boot",
31 "type": "bool",
32 "label": _("Fullboot"),
33 "default": False
34 },
35 {
36 "option": "nogui",
37 "type": "bool",
38 "label": _("No GUI"),
39 "default": False
40 },
41 {
42 "option": "config_file",
43 "type": "file",
44 "label": _("Custom config file"),
45 "advanced": True,
46 },
47 {
48 "option": "config_path",
49 "type": "directory_chooser",
50 "label": _("Custom config path"),
51 "advanced": True,
52 },
53 ]
54
55 def play(self):
56 arguments = [self.get_executable()]
57
58 if self.runner_config.get("fullscreen"):
59 arguments.append("--fullscreen")
60 if self.runner_config.get("full_boot"):
61 arguments.append("--fullboot")
62 if self.runner_config.get("nogui"):
63 arguments.append("--nogui")
64 if self.runner_config.get("config_file"):
65 arguments.append("--cfg=%s", self.runner_config["config_file"])
66 if self.runner_config.get("config_path"):
67 arguments.append("--cfgpath=%s", self.runner_config["config_path"])
68
69 iso = self.game_config.get("main_file") or ""
70 if not system.path_exists(iso):
71 return {"error": "FILE_NOT_FOUND", "file": iso}
72 arguments.append(iso)
73 return {"command": arguments}
74
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/lutris/runners/pcsx2.py b/lutris/runners/pcsx2.py
--- a/lutris/runners/pcsx2.py
+++ b/lutris/runners/pcsx2.py
@@ -62,9 +62,9 @@
if self.runner_config.get("nogui"):
arguments.append("--nogui")
if self.runner_config.get("config_file"):
- arguments.append("--cfg=%s", self.runner_config["config_file"])
+ arguments.append("--cfg={}".format(self.runner_config["config_file"]))
if self.runner_config.get("config_path"):
- arguments.append("--cfgpath=%s", self.runner_config["config_path"])
+ arguments.append("--cfgpath={}".format(self.runner_config["config_path"]))
iso = self.game_config.get("main_file") or ""
if not system.path_exists(iso):
|
{"golden_diff": "diff --git a/lutris/runners/pcsx2.py b/lutris/runners/pcsx2.py\n--- a/lutris/runners/pcsx2.py\n+++ b/lutris/runners/pcsx2.py\n@@ -62,9 +62,9 @@\n if self.runner_config.get(\"nogui\"):\n arguments.append(\"--nogui\")\n if self.runner_config.get(\"config_file\"):\n- arguments.append(\"--cfg=%s\", self.runner_config[\"config_file\"])\n+ arguments.append(\"--cfg={}\".format(self.runner_config[\"config_file\"]))\n if self.runner_config.get(\"config_path\"):\n- arguments.append(\"--cfgpath=%s\", self.runner_config[\"config_path\"])\n+ arguments.append(\"--cfgpath={}\".format(self.runner_config[\"config_path\"]))\n \n iso = self.game_config.get(\"main_file\") or \"\"\n if not system.path_exists(iso):\n", "issue": "PCSX2 Runner: Add config path/file options (Feature request)\nIt would be nice to have a way to specify a config file as for example some games run better with a multi-threaded microVU than others. It would also enable to have different window sizes set for those seeking square pixels as some NTSC games run at 640x448 and others use 512x448. Same goes for PAL region games. :slightly_smiling_face: \r\n\r\nThe command line is: `PCSX2 --cfg=<str>`\r\n\r\nI'm absolutely fine if you put it on low priority as probably only few people use PCSX2 anyways. :wink: \n", "before_files": [{"content": "# Standard Library\nfrom gettext import gettext as _\n\n# Lutris Modules\nfrom lutris.runners.runner import Runner\nfrom lutris.util import system\n\n\nclass pcsx2(Runner):\n human_name = _(\"PCSX2\")\n description = _(\"PlayStation 2 emulator\")\n platforms = [_(\"Sony PlayStation 2\")]\n runnable_alone = True\n runner_executable = \"pcsx2/PCSX2\"\n game_options = [{\n \"option\": \"main_file\",\n \"type\": \"file\",\n \"label\": _(\"ISO file\"),\n \"default_path\": \"game_path\",\n }]\n\n runner_options = [\n {\n \"option\": \"fullscreen\",\n \"type\": \"bool\",\n \"label\": _(\"Fullscreen\"),\n \"default\": False,\n },\n {\n \"option\": \"full_boot\",\n \"type\": \"bool\",\n \"label\": _(\"Fullboot\"),\n \"default\": False\n },\n {\n \"option\": \"nogui\",\n \"type\": \"bool\",\n \"label\": _(\"No GUI\"),\n \"default\": False\n },\n {\n \"option\": \"config_file\",\n \"type\": \"file\",\n \"label\": _(\"Custom config file\"),\n \"advanced\": True,\n },\n {\n \"option\": \"config_path\",\n \"type\": \"directory_chooser\",\n \"label\": _(\"Custom config path\"),\n \"advanced\": True,\n },\n ]\n\n def play(self):\n arguments = [self.get_executable()]\n\n if self.runner_config.get(\"fullscreen\"):\n arguments.append(\"--fullscreen\")\n if self.runner_config.get(\"full_boot\"):\n arguments.append(\"--fullboot\")\n if self.runner_config.get(\"nogui\"):\n arguments.append(\"--nogui\")\n if self.runner_config.get(\"config_file\"):\n arguments.append(\"--cfg=%s\", self.runner_config[\"config_file\"])\n if self.runner_config.get(\"config_path\"):\n arguments.append(\"--cfgpath=%s\", self.runner_config[\"config_path\"])\n\n iso = self.game_config.get(\"main_file\") or \"\"\n if not system.path_exists(iso):\n return {\"error\": \"FILE_NOT_FOUND\", \"file\": iso}\n arguments.append(iso)\n return {\"command\": arguments}\n", "path": "lutris/runners/pcsx2.py"}], "after_files": [{"content": "# Standard Library\nfrom gettext import gettext as _\n\n# Lutris Modules\nfrom lutris.runners.runner import Runner\nfrom lutris.util import system\n\n\nclass pcsx2(Runner):\n human_name = _(\"PCSX2\")\n description = _(\"PlayStation 2 emulator\")\n platforms = [_(\"Sony PlayStation 2\")]\n runnable_alone = True\n runner_executable = \"pcsx2/PCSX2\"\n game_options = [{\n \"option\": \"main_file\",\n \"type\": \"file\",\n \"label\": _(\"ISO file\"),\n \"default_path\": \"game_path\",\n }]\n\n runner_options = [\n {\n \"option\": \"fullscreen\",\n \"type\": \"bool\",\n \"label\": _(\"Fullscreen\"),\n \"default\": False,\n },\n {\n \"option\": \"full_boot\",\n \"type\": \"bool\",\n \"label\": _(\"Fullboot\"),\n \"default\": False\n },\n {\n \"option\": \"nogui\",\n \"type\": \"bool\",\n \"label\": _(\"No GUI\"),\n \"default\": False\n },\n {\n \"option\": \"config_file\",\n \"type\": \"file\",\n \"label\": _(\"Custom config file\"),\n \"advanced\": True,\n },\n {\n \"option\": \"config_path\",\n \"type\": \"directory_chooser\",\n \"label\": _(\"Custom config path\"),\n \"advanced\": True,\n },\n ]\n\n def play(self):\n arguments = [self.get_executable()]\n\n if self.runner_config.get(\"fullscreen\"):\n arguments.append(\"--fullscreen\")\n if self.runner_config.get(\"full_boot\"):\n arguments.append(\"--fullboot\")\n if self.runner_config.get(\"nogui\"):\n arguments.append(\"--nogui\")\n if self.runner_config.get(\"config_file\"):\n arguments.append(\"--cfg={}\".format(self.runner_config[\"config_file\"]))\n if self.runner_config.get(\"config_path\"):\n arguments.append(\"--cfgpath={}\".format(self.runner_config[\"config_path\"]))\n\n iso = self.game_config.get(\"main_file\") or \"\"\n if not system.path_exists(iso):\n return {\"error\": \"FILE_NOT_FOUND\", \"file\": iso}\n arguments.append(iso)\n return {\"command\": arguments}\n", "path": "lutris/runners/pcsx2.py"}]}
| 1,018 | 190 |
gh_patches_debug_2941
|
rasdani/github-patches
|
git_diff
|
learningequality__kolibri-10078
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Kolibri 0.16 - Resources of type HTML5 and exercises are not displayed
## Observed behavior
This is a follow up to https://github.com/learningequality/kolibri/pull/9724#issuecomment-1408889097
In the latest develop build both exercises and html resources are not being displayed when a user is navigating through the Library.
## Expected behavior
It should be possible to preview the resource.
## Steps to reproduce the issue
1. Install the the following [0. 16 build ](https://buildkite.com/learningequality/kolibri-debian/builds/5813#018603a8-a7d9-4c79-98d0-e2a0db6a7c69) and import the QA channel.
2. Go to Library > QA Channel
3. Click on any resource withing the HTML5 folder or the Exercises folder
## Videos
HTML5:
https://user-images.githubusercontent.com/79847249/215529161-a0e88738-b221-416a-beea-cf0c6192450f.mp4
EXERCISES:
https://user-images.githubusercontent.com/79847249/215529190-28ecdf59-db72-4b3a-a6df-2c72ab2f395c.mp4
## Console error
```
pluginMediator.js:122 Kolibri Modules: kolibri.plugins.learn.app registered
pluginMediator.js:122 Kolibri Modules: kolibri.plugins.media_player.main registered
pluginMediator.js:122 Kolibri Modules: kolibri.plugins.pdf_viewer.main registered
pluginMediator.js:122 Kolibri Modules: kolibri.plugins.epub_viewer.main registered
pluginMediator.js:122 Kolibri Modules: kolibri.plugins.html5_viewer.main registered
vue.runtime.esm.js:5753 GET http://127.0.0.1:51957/content/static/hashi/hashi-0efeb19f7e4ded20c73f.html 404 (Not Found)
insertBefore @ vue.runtime.esm.js:5753
insert @ vue.runtime.esm.js:6083
(anonymous) @ vue.runtime.esm.js:6030
createElm @ vue.runtime.esm.js:5969
(anonymous) @ vue.runtime.esm.js:6560
Vue._update @ vue.runtime.esm.js:3963
updateComponent @ vue.runtime.esm.js:4081
Watcher.get @ vue.runtime.esm.js:4495
Watcher.run @ vue.runtime.esm.js:4570
flushSchedulerQueue @ vue.runtime.esm.js:4326
(anonymous) @ vue.runtime.esm.js:1989
flushCallbacks @ vue.runtime.esm.js:1915
Promise.then (async)
timerFunc @ vue.runtime.esm.js:1942
nextTick @ vue.runtime.esm.js:1999
(anonymous) @ vue.runtime.esm.js:4418
Watcher.update @ vue.runtime.esm.js:4560
Vue.$forceUpdate @ vue.runtime.esm.js:3984
forceRender @ vue.runtime.esm.js:3668
(anonymous) @ vue.runtime.esm.js:3690
(anonymous) @ vue.runtime.esm.js:336
vue.runtime.esm.js:5753 GET http://127.0.0.1:51957/content/static/hashi/hashi-0efeb19f7e4ded20c73f.html 404 (Not Found)
insertBefore @ vue.runtime.esm.js:5753
insert @ vue.runtime.esm.js:6083
(anonymous) @ vue.runtime.esm.js:6030
createElm @ vue.runtime.esm.js:5969
(anonymous) @ vue.runtime.esm.js:6260
patchVnode @ vue.runtime.esm.js:6363
(anonymous) @ vue.runtime.esm.js:6526
Vue._update @ vue.runtime.esm.js:3963
updateComponent @ vue.runtime.esm.js:4081
Watcher.get @ vue.runtime.esm.js:4495
Watcher.run @ vue.runtime.esm.js:4570
flushSchedulerQueue @ vue.runtime.esm.js:4326
(anonymous) @ vue.runtime.esm.js:1989
flushCallbacks @ vue.runtime.esm.js:1915
Promise.then (async)
timerFunc @ vue.runtime.esm.js:1942
nextTick @ vue.runtime.esm.js:1999
(anonymous) @ vue.runtime.esm.js:4418
Watcher.update @ vue.runtime.esm.js:4560
Dep.notify @ vue.runtime.esm.js:730
set @ vue.runtime.esm.js:1055
sharedPropertyDefinition.set @ vue.runtime.esm.js:4644
(anonymous) @ ContentPage.vue:312
pluginMediator.js:122 Kolibri Modules: kolibri.plugins.perseus_viewer.main registered
```
## Usage Details
Windows 10, Ubuntu - Chrome, Firefox
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kolibri/deployment/default/alt_wsgi.py`
Content:
```
1 """
2 WSGI config for the alternate origin server used for serving
3 sandboxed content
4 """
5 import os
6
7 import kolibri.core.content
8 from kolibri.core.content.utils import paths
9 from kolibri.core.content.zip_wsgi import get_application
10 from kolibri.utils.kolibri_whitenoise import DynamicWhiteNoise
11
12 os.environ.setdefault(
13 "DJANGO_SETTINGS_MODULE", "kolibri.deployment.default.settings.base"
14 )
15
16
17 def generate_alt_wsgi_application():
18 alt_content_path = "/" + paths.get_content_url(
19 paths.zip_content_path_prefix()
20 ).lstrip("/")
21
22 content_dirs = [paths.get_content_dir_path()] + paths.get_content_fallback_paths()
23
24 content_static_path = os.path.join(
25 os.path.dirname(kolibri.core.content.__file__), "static"
26 )
27
28 # Mount static files
29 return DynamicWhiteNoise(
30 get_application(),
31 dynamic_locations=[
32 (alt_content_path, content_dir) for content_dir in content_dirs
33 ]
34 + [(paths.zip_content_static_root(), content_static_path)],
35 app_paths=paths.get_zip_content_base_path(),
36 )
37
38
39 alt_application = generate_alt_wsgi_application()
40
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/kolibri/deployment/default/alt_wsgi.py b/kolibri/deployment/default/alt_wsgi.py
--- a/kolibri/deployment/default/alt_wsgi.py
+++ b/kolibri/deployment/default/alt_wsgi.py
@@ -32,7 +32,7 @@
(alt_content_path, content_dir) for content_dir in content_dirs
]
+ [(paths.zip_content_static_root(), content_static_path)],
- app_paths=paths.get_zip_content_base_path(),
+ app_paths=[paths.get_zip_content_base_path()],
)
|
{"golden_diff": "diff --git a/kolibri/deployment/default/alt_wsgi.py b/kolibri/deployment/default/alt_wsgi.py\n--- a/kolibri/deployment/default/alt_wsgi.py\n+++ b/kolibri/deployment/default/alt_wsgi.py\n@@ -32,7 +32,7 @@\n (alt_content_path, content_dir) for content_dir in content_dirs\n ]\n + [(paths.zip_content_static_root(), content_static_path)],\n- app_paths=paths.get_zip_content_base_path(),\n+ app_paths=[paths.get_zip_content_base_path()],\n )\n", "issue": "Kolibri 0.16 - Resources of type HTML5 and exercises are not displayed\n## Observed behavior\r\nThis is a follow up to https://github.com/learningequality/kolibri/pull/9724#issuecomment-1408889097\r\n\r\nIn the latest develop build both exercises and html resources are not being displayed when a user is navigating through the Library.\r\n\r\n## Expected behavior\r\nIt should be possible to preview the resource.\r\n\r\n## Steps to reproduce the issue\r\n1. Install the the following [0. 16 build ](https://buildkite.com/learningequality/kolibri-debian/builds/5813#018603a8-a7d9-4c79-98d0-e2a0db6a7c69) and import the QA channel.\r\n2. Go to Library > QA Channel\r\n3. Click on any resource withing the HTML5 folder or the Exercises folder\r\n\r\n## Videos\r\n\r\nHTML5:\r\n\r\nhttps://user-images.githubusercontent.com/79847249/215529161-a0e88738-b221-416a-beea-cf0c6192450f.mp4\r\n\r\nEXERCISES:\r\n\r\nhttps://user-images.githubusercontent.com/79847249/215529190-28ecdf59-db72-4b3a-a6df-2c72ab2f395c.mp4\r\n\r\n## Console error\r\n\r\n```\r\npluginMediator.js:122 Kolibri Modules: kolibri.plugins.learn.app registered\r\npluginMediator.js:122 Kolibri Modules: kolibri.plugins.media_player.main registered\r\npluginMediator.js:122 Kolibri Modules: kolibri.plugins.pdf_viewer.main registered\r\npluginMediator.js:122 Kolibri Modules: kolibri.plugins.epub_viewer.main registered\r\npluginMediator.js:122 Kolibri Modules: kolibri.plugins.html5_viewer.main registered\r\nvue.runtime.esm.js:5753 GET http://127.0.0.1:51957/content/static/hashi/hashi-0efeb19f7e4ded20c73f.html 404 (Not Found)\r\ninsertBefore @ vue.runtime.esm.js:5753\r\ninsert @ vue.runtime.esm.js:6083\r\n(anonymous) @ vue.runtime.esm.js:6030\r\ncreateElm @ vue.runtime.esm.js:5969\r\n(anonymous) @ vue.runtime.esm.js:6560\r\nVue._update @ vue.runtime.esm.js:3963\r\nupdateComponent @ vue.runtime.esm.js:4081\r\nWatcher.get @ vue.runtime.esm.js:4495\r\nWatcher.run @ vue.runtime.esm.js:4570\r\nflushSchedulerQueue @ vue.runtime.esm.js:4326\r\n(anonymous) @ vue.runtime.esm.js:1989\r\nflushCallbacks @ vue.runtime.esm.js:1915\r\nPromise.then (async)\r\ntimerFunc @ vue.runtime.esm.js:1942\r\nnextTick @ vue.runtime.esm.js:1999\r\n(anonymous) @ vue.runtime.esm.js:4418\r\nWatcher.update @ vue.runtime.esm.js:4560\r\nVue.$forceUpdate @ vue.runtime.esm.js:3984\r\nforceRender @ vue.runtime.esm.js:3668\r\n(anonymous) @ vue.runtime.esm.js:3690\r\n(anonymous) @ vue.runtime.esm.js:336\r\nvue.runtime.esm.js:5753 GET http://127.0.0.1:51957/content/static/hashi/hashi-0efeb19f7e4ded20c73f.html 404 (Not Found)\r\ninsertBefore @ vue.runtime.esm.js:5753\r\ninsert @ vue.runtime.esm.js:6083\r\n(anonymous) @ vue.runtime.esm.js:6030\r\ncreateElm @ vue.runtime.esm.js:5969\r\n(anonymous) @ vue.runtime.esm.js:6260\r\npatchVnode @ vue.runtime.esm.js:6363\r\n(anonymous) @ vue.runtime.esm.js:6526\r\nVue._update @ vue.runtime.esm.js:3963\r\nupdateComponent @ vue.runtime.esm.js:4081\r\nWatcher.get @ vue.runtime.esm.js:4495\r\nWatcher.run @ vue.runtime.esm.js:4570\r\nflushSchedulerQueue @ vue.runtime.esm.js:4326\r\n(anonymous) @ vue.runtime.esm.js:1989\r\nflushCallbacks @ vue.runtime.esm.js:1915\r\nPromise.then (async)\r\ntimerFunc @ vue.runtime.esm.js:1942\r\nnextTick @ vue.runtime.esm.js:1999\r\n(anonymous) @ vue.runtime.esm.js:4418\r\nWatcher.update @ vue.runtime.esm.js:4560\r\nDep.notify @ vue.runtime.esm.js:730\r\nset @ vue.runtime.esm.js:1055\r\nsharedPropertyDefinition.set @ vue.runtime.esm.js:4644\r\n(anonymous) @ ContentPage.vue:312\r\npluginMediator.js:122 Kolibri Modules: kolibri.plugins.perseus_viewer.main registered\r\n```\r\n\r\n## Usage Details\r\nWindows 10, Ubuntu - Chrome, Firefox\n", "before_files": [{"content": "\"\"\"\nWSGI config for the alternate origin server used for serving\nsandboxed content\n\"\"\"\nimport os\n\nimport kolibri.core.content\nfrom kolibri.core.content.utils import paths\nfrom kolibri.core.content.zip_wsgi import get_application\nfrom kolibri.utils.kolibri_whitenoise import DynamicWhiteNoise\n\nos.environ.setdefault(\n \"DJANGO_SETTINGS_MODULE\", \"kolibri.deployment.default.settings.base\"\n)\n\n\ndef generate_alt_wsgi_application():\n alt_content_path = \"/\" + paths.get_content_url(\n paths.zip_content_path_prefix()\n ).lstrip(\"/\")\n\n content_dirs = [paths.get_content_dir_path()] + paths.get_content_fallback_paths()\n\n content_static_path = os.path.join(\n os.path.dirname(kolibri.core.content.__file__), \"static\"\n )\n\n # Mount static files\n return DynamicWhiteNoise(\n get_application(),\n dynamic_locations=[\n (alt_content_path, content_dir) for content_dir in content_dirs\n ]\n + [(paths.zip_content_static_root(), content_static_path)],\n app_paths=paths.get_zip_content_base_path(),\n )\n\n\nalt_application = generate_alt_wsgi_application()\n", "path": "kolibri/deployment/default/alt_wsgi.py"}], "after_files": [{"content": "\"\"\"\nWSGI config for the alternate origin server used for serving\nsandboxed content\n\"\"\"\nimport os\n\nimport kolibri.core.content\nfrom kolibri.core.content.utils import paths\nfrom kolibri.core.content.zip_wsgi import get_application\nfrom kolibri.utils.kolibri_whitenoise import DynamicWhiteNoise\n\nos.environ.setdefault(\n \"DJANGO_SETTINGS_MODULE\", \"kolibri.deployment.default.settings.base\"\n)\n\n\ndef generate_alt_wsgi_application():\n alt_content_path = \"/\" + paths.get_content_url(\n paths.zip_content_path_prefix()\n ).lstrip(\"/\")\n\n content_dirs = [paths.get_content_dir_path()] + paths.get_content_fallback_paths()\n\n content_static_path = os.path.join(\n os.path.dirname(kolibri.core.content.__file__), \"static\"\n )\n\n # Mount static files\n return DynamicWhiteNoise(\n get_application(),\n dynamic_locations=[\n (alt_content_path, content_dir) for content_dir in content_dirs\n ]\n + [(paths.zip_content_static_root(), content_static_path)],\n app_paths=[paths.get_zip_content_base_path()],\n )\n\n\nalt_application = generate_alt_wsgi_application()\n", "path": "kolibri/deployment/default/alt_wsgi.py"}]}
| 1,797 | 125 |
gh_patches_debug_28937
|
rasdani/github-patches
|
git_diff
|
bokeh__bokeh-9085
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[DOCS] Update links to bokehplots.com, gitter and mailing-list
Dev documentation still points to https://bokehplots.com/pages/contact.html. bokehplots.com has out-dated information and is being deprecated. On the gitter @bryevdv said he want's those links to point to bokeh.org instead.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sphinx/source/conf.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 from __future__ import unicode_literals
3 from os.path import abspath, dirname, join
4
5 #
6 # Bokeh documentation build configuration file, created by
7 # sphinx-quickstart on Sat Oct 12 23:43:03 2013.
8 #
9 # This file is execfile()d with the current directory set to its containing dir.
10 #
11 # Note that not all possible configuration values are present in this
12 # autogenerated file.
13 #
14 # All configuration values have a default; values that are commented out
15 # serve to show the default.
16
17 # If extensions (or modules to document with autodoc) are in another directory,
18 # add these directories to sys.path here. If the directory is relative to the
19 # documentation root, use os.path.abspath to make it absolute, like shown here.
20 #sys.path.insert(0, os.path.abspath('.'))
21
22 # -- General configuration -----------------------------------------------------
23
24 # If your documentation needs a minimal Sphinx version, state it here.
25 needs_sphinx = '1.8'
26
27 # Add any Sphinx extension module names here, as strings. They can be extensions
28 # coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
29 extensions = [
30 'sphinx.ext.autodoc',
31 'sphinx.ext.autosummary',
32 'sphinx.ext.ifconfig',
33 'sphinx.ext.napoleon',
34 'sphinx.ext.intersphinx',
35 'sphinx.ext.viewcode',
36 'bokeh.sphinxext.bokeh_autodoc',
37 'bokeh.sphinxext.bokeh_color',
38 'bokeh.sphinxext.bokeh_enum',
39 'bokeh.sphinxext.bokeh_gallery',
40 'bokeh.sphinxext.bokeh_github',
41 'bokeh.sphinxext.bokeh_jinja',
42 'bokeh.sphinxext.bokeh_model',
43 'bokeh.sphinxext.bokeh_options',
44 'bokeh.sphinxext.bokeh_palette',
45 'bokeh.sphinxext.bokeh_palette_group',
46 'bokeh.sphinxext.bokeh_plot',
47 'bokeh.sphinxext.bokeh_prop',
48 'bokeh.sphinxext.bokeh_releases',
49 'bokeh.sphinxext.bokeh_sitemap',
50 'bokeh.sphinxext.bokehjs_content',
51 'bokeh.sphinxext.collapsible_code_block',
52 ]
53
54 napoleon_include_init_with_doc = True
55
56 # Add any paths that contain templates here, relative to this directory.
57 templates_path = ['_templates']
58
59 # The suffix of source filenames.
60 source_suffix = '.rst'
61
62 # The encoding of source files.
63 #source_encoding = 'utf-8-sig'
64
65 # The master toctree document.
66 master_doc = 'index'
67
68 # General information about the project.
69 project = 'Bokeh'
70 copyright = '© Copyright 2015-2018, Anaconda and Bokeh Contributors.'
71
72 # Get the standard computed Bokeh version string to use for |version|
73 # and |release|
74 from bokeh import __version__
75
76 # The short X.Y version.
77 version = __version__
78 # The full version, including alpha/beta/rc tags.
79 release = __version__
80
81 # Check for version override (e.g. when re-deploying a previously released
82 # docs, or when pushing test docs that do not have a corresponding BokehJS
83 # available on CDN)
84 from bokeh.settings import settings
85 if settings.docs_version():
86 version = release = settings.docs_version()
87
88 # get all the versions that will appear in the version dropdown
89 f = open(join(dirname(abspath(__file__)), "all_versions.txt"))
90 all_versions = [x.strip() for x in reversed(f.readlines())]
91
92 # The language for content autogenerated by Sphinx. Refer to documentation
93 # for a list of supported languages.
94 #language = None
95
96 # There are two options for replacing |today|: either, you set today to some
97 # non-false value, then it is used:
98 #today = ''
99 # Else, today_fmt is used as the format for a strftime call.
100 #today_fmt = '%B %d, %Y'
101
102 # List of patterns, relative to source directory, that match files and
103 # directories to ignore when looking for source files.
104 #
105 # NOTE: in these docs all .py script are assumed to be bokeh plot scripts!
106 # with bokeh_plot_pyfile_include_dirs set desired folder to look for .py files
107 bokeh_plot_pyfile_include_dirs = ['docs']
108
109 # Whether to allow builds to succeed if a Google API key is not defined and plots
110 # containing "GOOGLE_API_KEY" are processed
111 bokeh_missing_google_api_key_ok = False
112
113 # The reST default role (used for this markup: `text`) to use for all documents.
114 #default_role = None
115
116 # If true, '()' will be appended to :func: etc. cross-reference text.
117 #add_function_parentheses = True
118
119 # If true, the current module name will be prepended to all description
120 # unit titles (such as .. function::).
121 add_module_names = False
122
123 # If true, sectionauthor and moduleauthor directives will be shown in the
124 # output. They are ignored by default.
125 #show_authors = False
126
127 # The name of the Pygments (syntax highlighting) style to use.
128 pygments_style = 'sphinx'
129
130 # A list of ignored prefixes for module index sorting.
131 #modindex_common_prefix = []
132
133 # Sort members by type
134 autodoc_member_order = 'groupwise'
135
136 # patterns to exclude
137 exclude_patterns = ['docs/releases/*']
138
139 # This would more properly be done with rst_epilog but something about
140 # the combination of this with the bokeh-gallery directive breaks the build
141 rst_prolog = """
142 .. |Color| replace:: :py:class:`~bokeh.core.properties.Color`
143 .. |DataSpec| replace:: :py:class:`~bokeh.core.properties.DataSpec`
144 .. |Document| replace:: :py:class:`~bokeh.document.Document`
145 .. |HasProps| replace:: :py:class:`~bokeh.core.has_props.HasProps`
146 .. |Model| replace:: :py:class:`~bokeh.model.Model`
147 .. |Property| replace:: :py:class:`~bokeh.core.property.bases.Property`
148 .. |PropertyDescriptor| replace:: :py:class:`~bokeh.core.property.descriptor.PropertyDescriptor`
149 .. |PropertyContainer| replace:: :py:class:`~bokeh.core.property.wrappers.PropertyContainer`
150 .. |UnitsSpec| replace:: :py:class:`~bokeh.core.properties.UnitsSpec`
151
152 .. |field| replace:: :py:func:`~bokeh.core.properties.field`
153 .. |value| replace:: :py:func:`~bokeh.core.properties.value`
154 """
155
156 # -- Options for HTML output ---------------------------------------------------
157
158 # The theme to use for HTML and HTML Help pages. See the documentation for
159 # a list of builtin themes.
160 html_theme = 'bokeh_theme'
161 html_theme_path = ['.']
162
163 html_context = {
164 'SITEMAP_BASE_URL': 'https://bokeh.pydata.org/en/', # Trailing slash is needed
165 'DESCRIPTION': 'Bokeh visualization library, documentation site.',
166 'AUTHOR': 'Bokeh contributors',
167 'VERSION': version,
168 'NAV': (
169 ('Github', '//github.com/bokeh/bokeh'),
170 ),
171 'ABOUT': (
172 ('Vision and Work', 'vision'),
173 ('Team', 'team'),
174 ('Citation', 'citation'),
175 ('Contact', 'contact'),
176 ),
177 'SOCIAL': (
178 ('Contribute', 'contribute'),
179 ('Mailing list', '//groups.google.com/a/anaconda.com/forum/#!forum/bokeh'),
180 ('Github', '//github.com/bokeh/bokeh'),
181 ('Twitter', '//twitter.com/BokehPlots'),
182 ),
183 'NAV_DOCS': (
184 ('Installation', 'installation'),
185 ('User Guide', 'user_guide'),
186 ('Gallery', 'gallery'),
187 ('Tutorial', 'https://mybinder.org/v2/gh/bokeh/bokeh-notebooks/master?filepath=tutorial%2F00%20-%20Introduction%20and%20Setup.ipynb'),
188 ('Reference', 'reference'),
189 ('Releases', 'releases'),
190 ('Developer Guide', 'dev_guide'),
191 ),
192 'ALL_VERSIONS': all_versions,
193 }
194
195 # If true, links to the reST sources are added to the pages.
196 html_show_sourcelink = True
197
198 # Output file base name for HTML help builder.
199 htmlhelp_basename = 'Bokehdoc'
200
201 # -- Options for LaTeX output --------------------------------------------------
202
203 latex_elements = {
204 # The paper size ('letterpaper' or 'a4paper').
205 #'papersize': 'letterpaper',
206
207 # The font size ('10pt', '11pt' or '12pt').
208 #'pointsize': '10pt',
209
210 # Additional stuff for the LaTeX preamble.
211 #'preamble': '',
212 }
213
214 # Grouping the document tree into LaTeX files. List of tuples
215 # (source start file, target name, title, author, documentclass [howto/manual]).
216 latex_documents = [
217 ('index', 'Bokeh.tex', u'Bokeh Documentation', u'Anaconda', 'manual'),
218 ]
219
220 # The name of an image file (relative to this directory) to place at the top of
221 # the title page.
222 #latex_logo = None
223
224 # For "manual" documents, if this is true, then toplevel headings are parts,
225 # not chapters.
226 #latex_use_parts = False
227
228 # If true, show page references after internal links.
229 #latex_show_pagerefs = False
230
231 # If true, show URL addresses after external links.
232 #latex_show_urls = False
233
234 # Documents to append as an appendix to all manuals.
235 #latex_appendices = []
236
237 # If false, no module index is generated.
238 #latex_domain_indices = True
239
240
241 # -- Options for manual page output --------------------------------------------
242
243 # One entry per manual page. List of tuples
244 # (source start file, name, description, authors, manual section).
245 man_pages = [
246 ('index', 'bokeh', u'Bokeh Documentation',
247 [u'Anaconda'], 1)
248 ]
249
250 # If true, show URL addresses after external links.
251 #man_show_urls = False
252
253
254 # -- Options for Texinfo output ------------------------------------------------
255
256 # Grouping the document tree into Texinfo files. List of tuples
257 # (source start file, target name, title, author,
258 # dir menu entry, description, category)
259 texinfo_documents = [
260 ('index', 'Bokeh', u'Bokeh Documentation', u'Anaconda', 'Bokeh', 'Interactive Web Plotting for Python', 'Graphics'),
261 ]
262
263 # Documents to append as an appendix to all manuals.
264 #texinfo_appendices = []
265
266 # If false, no module index is generated.
267 #texinfo_domain_indices = True
268
269 # How to display URL addresses: 'footnote', 'no', or 'inline'.
270 #texinfo_show_urls = 'footnote'
271
272 # intersphinx settings
273 intersphinx_mapping = {
274 'python': ('https://docs.python.org/3/', None),
275 'pandas': ('http://pandas.pydata.org/pandas-docs/stable/', None),
276 'numpy': ('https://docs.scipy.org/doc/numpy/', None)
277 }
278
```
Path: `scripts/deps.py`
Content:
```
1 import sys
2 import platform
3 import jinja2
4 import yaml
5
6
7 def load_setup_py_data():
8 import os
9 import setuptools
10 os.environ['CONDA_BUILD_STATE'] = 'RENDER'
11 data = {}
12
13 def _setup(**kw): data.update(kw)
14 setuptools.setup = _setup
15 return data
16
17 meta_src = jinja2.Template(open("conda.recipe/meta.yaml").read())
18 meta_src = yaml.load(meta_src.render(load_setup_py_data=load_setup_py_data),
19 Loader=yaml.FullLoader)
20
21 section = {
22 "build" : meta_src["requirements"]["build"],
23 "deploy" : meta_src["extra"]["deploy"],
24 "run" : meta_src["requirements"]["run"],
25 "test" : meta_src["test"]["requires"],
26 }
27
28 spec = []
29 for name in sys.argv[1:]:
30 spec += section[name]
31
32 # bare python unpins python version causing upgrade to latest
33 if 'python' in spec: spec.remove('python')
34
35 # add double quotes to specs for windows, fixes #9065
36 if "windows" in platform.platform().lower():
37 spec = ['"{}"'.format(s) for s in spec]
38
39 deps = ""
40 deps += " ".join(s for s in spec)
41 deps = deps.replace(' >=', '>=') # conda syntax doesn't allow spaces b/w pkg name and version spec
42 deps = deps.replace(' <', '<')
43 deps = deps.replace(' [unix]', ' ')
44
45 print(deps)
46
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/scripts/deps.py b/scripts/deps.py
--- a/scripts/deps.py
+++ b/scripts/deps.py
@@ -15,8 +15,18 @@
return data
meta_src = jinja2.Template(open("conda.recipe/meta.yaml").read())
-meta_src = yaml.load(meta_src.render(load_setup_py_data=load_setup_py_data),
- Loader=yaml.FullLoader)
+try:
+ meta_src = yaml.load(meta_src.render(load_setup_py_data=load_setup_py_data),
+ Loader=yaml.FullLoader)
+except AttributeError as e:
+ # Loader=yaml.FullLoader added in pyyaml 5.1 because of:
+ # https://github.com/yaml/pyyaml/wiki/PyYAML-yaml.load(input)-Deprecation
+ # isn't available on conda for python=3.5
+ # fall back to calling without loader if it isn't available
+ if 'FullLoader' in repr(e):
+ meta_src = yaml.load(meta_src.render(load_setup_py_data=load_setup_py_data))
+ else:
+ raise
section = {
"build" : meta_src["requirements"]["build"],
diff --git a/sphinx/source/conf.py b/sphinx/source/conf.py
--- a/sphinx/source/conf.py
+++ b/sphinx/source/conf.py
@@ -169,14 +169,14 @@
('Github', '//github.com/bokeh/bokeh'),
),
'ABOUT': (
- ('Vision and Work', 'vision'),
- ('Team', 'team'),
- ('Citation', 'citation'),
- ('Contact', 'contact'),
+ ('Roadmap', 'https://bokeh.org/roadmap'),
+ ('Team', 'https://bokeh.org/team'),
+ ('Citation', 'https://bokeh.org/citation'),
+ ('Contact', 'https://bokeh.org'),
),
'SOCIAL': (
('Contribute', 'contribute'),
- ('Mailing list', '//groups.google.com/a/anaconda.com/forum/#!forum/bokeh'),
+ ('Discourse', 'https://discourse.bokeh.org'),
('Github', '//github.com/bokeh/bokeh'),
('Twitter', '//twitter.com/BokehPlots'),
),
|
{"golden_diff": "diff --git a/scripts/deps.py b/scripts/deps.py\n--- a/scripts/deps.py\n+++ b/scripts/deps.py\n@@ -15,8 +15,18 @@\n return data\n \n meta_src = jinja2.Template(open(\"conda.recipe/meta.yaml\").read())\n-meta_src = yaml.load(meta_src.render(load_setup_py_data=load_setup_py_data),\n- Loader=yaml.FullLoader)\n+try:\n+ meta_src = yaml.load(meta_src.render(load_setup_py_data=load_setup_py_data),\n+ Loader=yaml.FullLoader)\n+except AttributeError as e:\n+ # Loader=yaml.FullLoader added in pyyaml 5.1 because of:\n+ # https://github.com/yaml/pyyaml/wiki/PyYAML-yaml.load(input)-Deprecation\n+ # isn't available on conda for python=3.5\n+ # fall back to calling without loader if it isn't available\n+ if 'FullLoader' in repr(e):\n+ meta_src = yaml.load(meta_src.render(load_setup_py_data=load_setup_py_data))\n+ else:\n+ raise\n \n section = {\n \"build\" : meta_src[\"requirements\"][\"build\"],\ndiff --git a/sphinx/source/conf.py b/sphinx/source/conf.py\n--- a/sphinx/source/conf.py\n+++ b/sphinx/source/conf.py\n@@ -169,14 +169,14 @@\n ('Github', '//github.com/bokeh/bokeh'),\n ),\n 'ABOUT': (\n- ('Vision and Work', 'vision'),\n- ('Team', 'team'),\n- ('Citation', 'citation'),\n- ('Contact', 'contact'),\n+ ('Roadmap', 'https://bokeh.org/roadmap'),\n+ ('Team', 'https://bokeh.org/team'),\n+ ('Citation', 'https://bokeh.org/citation'),\n+ ('Contact', 'https://bokeh.org'),\n ),\n 'SOCIAL': (\n ('Contribute', 'contribute'),\n- ('Mailing list', '//groups.google.com/a/anaconda.com/forum/#!forum/bokeh'),\n+ ('Discourse', 'https://discourse.bokeh.org'),\n ('Github', '//github.com/bokeh/bokeh'),\n ('Twitter', '//twitter.com/BokehPlots'),\n ),\n", "issue": "[DOCS] Update links to bokehplots.com, gitter and mailing-list\nDev documentation still points to https://bokehplots.com/pages/contact.html. bokehplots.com has out-dated information and is being deprecated. On the gitter @bryevdv said he want's those links to point to bokeh.org instead.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\nfrom os.path import abspath, dirname, join\n\n#\n# Bokeh documentation build configuration file, created by\n# sphinx-quickstart on Sat Oct 12 23:43:03 2013.\n#\n# This file is execfile()d with the current directory set to its containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#sys.path.insert(0, os.path.abspath('.'))\n\n# -- General configuration -----------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\nneeds_sphinx = '1.8'\n\n# Add any Sphinx extension module names here, as strings. They can be extensions\n# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.autosummary',\n 'sphinx.ext.ifconfig',\n 'sphinx.ext.napoleon',\n 'sphinx.ext.intersphinx',\n 'sphinx.ext.viewcode',\n 'bokeh.sphinxext.bokeh_autodoc',\n 'bokeh.sphinxext.bokeh_color',\n 'bokeh.sphinxext.bokeh_enum',\n 'bokeh.sphinxext.bokeh_gallery',\n 'bokeh.sphinxext.bokeh_github',\n 'bokeh.sphinxext.bokeh_jinja',\n 'bokeh.sphinxext.bokeh_model',\n 'bokeh.sphinxext.bokeh_options',\n 'bokeh.sphinxext.bokeh_palette',\n 'bokeh.sphinxext.bokeh_palette_group',\n 'bokeh.sphinxext.bokeh_plot',\n 'bokeh.sphinxext.bokeh_prop',\n 'bokeh.sphinxext.bokeh_releases',\n 'bokeh.sphinxext.bokeh_sitemap',\n 'bokeh.sphinxext.bokehjs_content',\n 'bokeh.sphinxext.collapsible_code_block',\n]\n\nnapoleon_include_init_with_doc = True\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix of source filenames.\nsource_suffix = '.rst'\n\n# The encoding of source files.\n#source_encoding = 'utf-8-sig'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = 'Bokeh'\ncopyright = '\u00a9 Copyright 2015-2018, Anaconda and Bokeh Contributors.'\n\n# Get the standard computed Bokeh version string to use for |version|\n# and |release|\nfrom bokeh import __version__\n\n# The short X.Y version.\nversion = __version__\n# The full version, including alpha/beta/rc tags.\nrelease = __version__\n\n# Check for version override (e.g. when re-deploying a previously released\n# docs, or when pushing test docs that do not have a corresponding BokehJS\n# available on CDN)\nfrom bokeh.settings import settings\nif settings.docs_version():\n version = release = settings.docs_version()\n\n# get all the versions that will appear in the version dropdown\nf = open(join(dirname(abspath(__file__)), \"all_versions.txt\"))\nall_versions = [x.strip() for x in reversed(f.readlines())]\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#language = None\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n#today = ''\n# Else, today_fmt is used as the format for a strftime call.\n#today_fmt = '%B %d, %Y'\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n#\n# NOTE: in these docs all .py script are assumed to be bokeh plot scripts!\n# with bokeh_plot_pyfile_include_dirs set desired folder to look for .py files\nbokeh_plot_pyfile_include_dirs = ['docs']\n\n# Whether to allow builds to succeed if a Google API key is not defined and plots\n# containing \"GOOGLE_API_KEY\" are processed\nbokeh_missing_google_api_key_ok = False\n\n# The reST default role (used for this markup: `text`) to use for all documents.\n#default_role = None\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n#add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\nadd_module_names = False\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\n#show_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# A list of ignored prefixes for module index sorting.\n#modindex_common_prefix = []\n\n# Sort members by type\nautodoc_member_order = 'groupwise'\n\n# patterns to exclude\nexclude_patterns = ['docs/releases/*']\n\n# This would more properly be done with rst_epilog but something about\n# the combination of this with the bokeh-gallery directive breaks the build\nrst_prolog = \"\"\"\n.. |Color| replace:: :py:class:`~bokeh.core.properties.Color`\n.. |DataSpec| replace:: :py:class:`~bokeh.core.properties.DataSpec`\n.. |Document| replace:: :py:class:`~bokeh.document.Document`\n.. |HasProps| replace:: :py:class:`~bokeh.core.has_props.HasProps`\n.. |Model| replace:: :py:class:`~bokeh.model.Model`\n.. |Property| replace:: :py:class:`~bokeh.core.property.bases.Property`\n.. |PropertyDescriptor| replace:: :py:class:`~bokeh.core.property.descriptor.PropertyDescriptor`\n.. |PropertyContainer| replace:: :py:class:`~bokeh.core.property.wrappers.PropertyContainer`\n.. |UnitsSpec| replace:: :py:class:`~bokeh.core.properties.UnitsSpec`\n\n.. |field| replace:: :py:func:`~bokeh.core.properties.field`\n.. |value| replace:: :py:func:`~bokeh.core.properties.value`\n\"\"\"\n\n# -- Options for HTML output ---------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\nhtml_theme = 'bokeh_theme'\nhtml_theme_path = ['.']\n\nhtml_context = {\n 'SITEMAP_BASE_URL': 'https://bokeh.pydata.org/en/', # Trailing slash is needed\n 'DESCRIPTION': 'Bokeh visualization library, documentation site.',\n 'AUTHOR': 'Bokeh contributors',\n 'VERSION': version,\n 'NAV': (\n ('Github', '//github.com/bokeh/bokeh'),\n ),\n 'ABOUT': (\n ('Vision and Work', 'vision'),\n ('Team', 'team'),\n ('Citation', 'citation'),\n ('Contact', 'contact'),\n ),\n 'SOCIAL': (\n ('Contribute', 'contribute'),\n ('Mailing list', '//groups.google.com/a/anaconda.com/forum/#!forum/bokeh'),\n ('Github', '//github.com/bokeh/bokeh'),\n ('Twitter', '//twitter.com/BokehPlots'),\n ),\n 'NAV_DOCS': (\n ('Installation', 'installation'),\n ('User Guide', 'user_guide'),\n ('Gallery', 'gallery'),\n ('Tutorial', 'https://mybinder.org/v2/gh/bokeh/bokeh-notebooks/master?filepath=tutorial%2F00%20-%20Introduction%20and%20Setup.ipynb'),\n ('Reference', 'reference'),\n ('Releases', 'releases'),\n ('Developer Guide', 'dev_guide'),\n ),\n 'ALL_VERSIONS': all_versions,\n}\n\n# If true, links to the reST sources are added to the pages.\nhtml_show_sourcelink = True\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'Bokehdoc'\n\n# -- Options for LaTeX output --------------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n #'papersize': 'letterpaper',\n\n # The font size ('10pt', '11pt' or '12pt').\n #'pointsize': '10pt',\n\n # Additional stuff for the LaTeX preamble.\n #'preamble': '',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title, author, documentclass [howto/manual]).\nlatex_documents = [\n ('index', 'Bokeh.tex', u'Bokeh Documentation', u'Anaconda', 'manual'),\n]\n\n# The name of an image file (relative to this directory) to place at the top of\n# the title page.\n#latex_logo = None\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n#latex_use_parts = False\n\n# If true, show page references after internal links.\n#latex_show_pagerefs = False\n\n# If true, show URL addresses after external links.\n#latex_show_urls = False\n\n# Documents to append as an appendix to all manuals.\n#latex_appendices = []\n\n# If false, no module index is generated.\n#latex_domain_indices = True\n\n\n# -- Options for manual page output --------------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n ('index', 'bokeh', u'Bokeh Documentation',\n [u'Anaconda'], 1)\n]\n\n# If true, show URL addresses after external links.\n#man_show_urls = False\n\n\n# -- Options for Texinfo output ------------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n ('index', 'Bokeh', u'Bokeh Documentation', u'Anaconda', 'Bokeh', 'Interactive Web Plotting for Python', 'Graphics'),\n]\n\n# Documents to append as an appendix to all manuals.\n#texinfo_appendices = []\n\n# If false, no module index is generated.\n#texinfo_domain_indices = True\n\n# How to display URL addresses: 'footnote', 'no', or 'inline'.\n#texinfo_show_urls = 'footnote'\n\n# intersphinx settings\nintersphinx_mapping = {\n 'python': ('https://docs.python.org/3/', None),\n 'pandas': ('http://pandas.pydata.org/pandas-docs/stable/', None),\n 'numpy': ('https://docs.scipy.org/doc/numpy/', None)\n}\n", "path": "sphinx/source/conf.py"}, {"content": "import sys\nimport platform\nimport jinja2\nimport yaml\n\n\ndef load_setup_py_data():\n import os\n import setuptools\n os.environ['CONDA_BUILD_STATE'] = 'RENDER'\n data = {}\n\n def _setup(**kw): data.update(kw)\n setuptools.setup = _setup\n return data\n\nmeta_src = jinja2.Template(open(\"conda.recipe/meta.yaml\").read())\nmeta_src = yaml.load(meta_src.render(load_setup_py_data=load_setup_py_data),\n Loader=yaml.FullLoader)\n\nsection = {\n \"build\" : meta_src[\"requirements\"][\"build\"],\n \"deploy\" : meta_src[\"extra\"][\"deploy\"],\n \"run\" : meta_src[\"requirements\"][\"run\"],\n \"test\" : meta_src[\"test\"][\"requires\"],\n}\n\nspec = []\nfor name in sys.argv[1:]:\n spec += section[name]\n\n# bare python unpins python version causing upgrade to latest\nif 'python' in spec: spec.remove('python')\n\n# add double quotes to specs for windows, fixes #9065\nif \"windows\" in platform.platform().lower():\n spec = ['\"{}\"'.format(s) for s in spec]\n\ndeps = \"\"\ndeps += \" \".join(s for s in spec)\ndeps = deps.replace(' >=', '>=') # conda syntax doesn't allow spaces b/w pkg name and version spec\ndeps = deps.replace(' <', '<')\ndeps = deps.replace(' [unix]', ' ')\n\nprint(deps)\n", "path": "scripts/deps.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\nfrom os.path import abspath, dirname, join\n\n#\n# Bokeh documentation build configuration file, created by\n# sphinx-quickstart on Sat Oct 12 23:43:03 2013.\n#\n# This file is execfile()d with the current directory set to its containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#sys.path.insert(0, os.path.abspath('.'))\n\n# -- General configuration -----------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\nneeds_sphinx = '1.8'\n\n# Add any Sphinx extension module names here, as strings. They can be extensions\n# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.autosummary',\n 'sphinx.ext.ifconfig',\n 'sphinx.ext.napoleon',\n 'sphinx.ext.intersphinx',\n 'sphinx.ext.viewcode',\n 'bokeh.sphinxext.bokeh_autodoc',\n 'bokeh.sphinxext.bokeh_color',\n 'bokeh.sphinxext.bokeh_enum',\n 'bokeh.sphinxext.bokeh_gallery',\n 'bokeh.sphinxext.bokeh_github',\n 'bokeh.sphinxext.bokeh_jinja',\n 'bokeh.sphinxext.bokeh_model',\n 'bokeh.sphinxext.bokeh_options',\n 'bokeh.sphinxext.bokeh_palette',\n 'bokeh.sphinxext.bokeh_palette_group',\n 'bokeh.sphinxext.bokeh_plot',\n 'bokeh.sphinxext.bokeh_prop',\n 'bokeh.sphinxext.bokeh_releases',\n 'bokeh.sphinxext.bokeh_sitemap',\n 'bokeh.sphinxext.bokehjs_content',\n 'bokeh.sphinxext.collapsible_code_block',\n]\n\nnapoleon_include_init_with_doc = True\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix of source filenames.\nsource_suffix = '.rst'\n\n# The encoding of source files.\n#source_encoding = 'utf-8-sig'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = 'Bokeh'\ncopyright = '\u00a9 Copyright 2015-2018, Anaconda and Bokeh Contributors.'\n\n# Get the standard computed Bokeh version string to use for |version|\n# and |release|\nfrom bokeh import __version__\n\n# The short X.Y version.\nversion = __version__\n# The full version, including alpha/beta/rc tags.\nrelease = __version__\n\n# Check for version override (e.g. when re-deploying a previously released\n# docs, or when pushing test docs that do not have a corresponding BokehJS\n# available on CDN)\nfrom bokeh.settings import settings\nif settings.docs_version():\n version = release = settings.docs_version()\n\n# get all the versions that will appear in the version dropdown\nf = open(join(dirname(abspath(__file__)), \"all_versions.txt\"))\nall_versions = [x.strip() for x in reversed(f.readlines())]\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#language = None\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n#today = ''\n# Else, today_fmt is used as the format for a strftime call.\n#today_fmt = '%B %d, %Y'\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n#\n# NOTE: in these docs all .py script are assumed to be bokeh plot scripts!\n# with bokeh_plot_pyfile_include_dirs set desired folder to look for .py files\nbokeh_plot_pyfile_include_dirs = ['docs']\n\n# Whether to allow builds to succeed if a Google API key is not defined and plots\n# containing \"GOOGLE_API_KEY\" are processed\nbokeh_missing_google_api_key_ok = False\n\n# The reST default role (used for this markup: `text`) to use for all documents.\n#default_role = None\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n#add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\nadd_module_names = False\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\n#show_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# A list of ignored prefixes for module index sorting.\n#modindex_common_prefix = []\n\n# Sort members by type\nautodoc_member_order = 'groupwise'\n\n# patterns to exclude\nexclude_patterns = ['docs/releases/*']\n\n# This would more properly be done with rst_epilog but something about\n# the combination of this with the bokeh-gallery directive breaks the build\nrst_prolog = \"\"\"\n.. |Color| replace:: :py:class:`~bokeh.core.properties.Color`\n.. |DataSpec| replace:: :py:class:`~bokeh.core.properties.DataSpec`\n.. |Document| replace:: :py:class:`~bokeh.document.Document`\n.. |HasProps| replace:: :py:class:`~bokeh.core.has_props.HasProps`\n.. |Model| replace:: :py:class:`~bokeh.model.Model`\n.. |Property| replace:: :py:class:`~bokeh.core.property.bases.Property`\n.. |PropertyDescriptor| replace:: :py:class:`~bokeh.core.property.descriptor.PropertyDescriptor`\n.. |PropertyContainer| replace:: :py:class:`~bokeh.core.property.wrappers.PropertyContainer`\n.. |UnitsSpec| replace:: :py:class:`~bokeh.core.properties.UnitsSpec`\n\n.. |field| replace:: :py:func:`~bokeh.core.properties.field`\n.. |value| replace:: :py:func:`~bokeh.core.properties.value`\n\"\"\"\n\n# -- Options for HTML output ---------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\nhtml_theme = 'bokeh_theme'\nhtml_theme_path = ['.']\n\nhtml_context = {\n 'SITEMAP_BASE_URL': 'https://bokeh.pydata.org/en/', # Trailing slash is needed\n 'DESCRIPTION': 'Bokeh visualization library, documentation site.',\n 'AUTHOR': 'Bokeh contributors',\n 'VERSION': version,\n 'NAV': (\n ('Github', '//github.com/bokeh/bokeh'),\n ),\n 'ABOUT': (\n ('Roadmap', 'https://bokeh.org/roadmap'),\n ('Team', 'https://bokeh.org/team'),\n ('Citation', 'https://bokeh.org/citation'),\n ('Contact', 'https://bokeh.org'),\n ),\n 'SOCIAL': (\n ('Contribute', 'contribute'),\n ('Discourse', 'https://discourse.bokeh.org'),\n ('Github', '//github.com/bokeh/bokeh'),\n ('Twitter', '//twitter.com/BokehPlots'),\n ),\n 'NAV_DOCS': (\n ('Installation', 'installation'),\n ('User Guide', 'user_guide'),\n ('Gallery', 'gallery'),\n ('Tutorial', 'https://mybinder.org/v2/gh/bokeh/bokeh-notebooks/master?filepath=tutorial%2F00%20-%20Introduction%20and%20Setup.ipynb'),\n ('Reference', 'reference'),\n ('Releases', 'releases'),\n ('Developer Guide', 'dev_guide'),\n ),\n 'ALL_VERSIONS': all_versions,\n}\n\n# If true, links to the reST sources are added to the pages.\nhtml_show_sourcelink = True\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'Bokehdoc'\n\n# -- Options for LaTeX output --------------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n #'papersize': 'letterpaper',\n\n # The font size ('10pt', '11pt' or '12pt').\n #'pointsize': '10pt',\n\n # Additional stuff for the LaTeX preamble.\n #'preamble': '',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title, author, documentclass [howto/manual]).\nlatex_documents = [\n ('index', 'Bokeh.tex', u'Bokeh Documentation', u'Anaconda', 'manual'),\n]\n\n# The name of an image file (relative to this directory) to place at the top of\n# the title page.\n#latex_logo = None\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n#latex_use_parts = False\n\n# If true, show page references after internal links.\n#latex_show_pagerefs = False\n\n# If true, show URL addresses after external links.\n#latex_show_urls = False\n\n# Documents to append as an appendix to all manuals.\n#latex_appendices = []\n\n# If false, no module index is generated.\n#latex_domain_indices = True\n\n\n# -- Options for manual page output --------------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n ('index', 'bokeh', u'Bokeh Documentation',\n [u'Anaconda'], 1)\n]\n\n# If true, show URL addresses after external links.\n#man_show_urls = False\n\n\n# -- Options for Texinfo output ------------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n ('index', 'Bokeh', u'Bokeh Documentation', u'Anaconda', 'Bokeh', 'Interactive Web Plotting for Python', 'Graphics'),\n]\n\n# Documents to append as an appendix to all manuals.\n#texinfo_appendices = []\n\n# If false, no module index is generated.\n#texinfo_domain_indices = True\n\n# How to display URL addresses: 'footnote', 'no', or 'inline'.\n#texinfo_show_urls = 'footnote'\n\n# intersphinx settings\nintersphinx_mapping = {\n 'python': ('https://docs.python.org/3/', None),\n 'pandas': ('http://pandas.pydata.org/pandas-docs/stable/', None),\n 'numpy': ('https://docs.scipy.org/doc/numpy/', None)\n}\n", "path": "sphinx/source/conf.py"}, {"content": "import sys\nimport platform\nimport jinja2\nimport yaml\n\n\ndef load_setup_py_data():\n import os\n import setuptools\n os.environ['CONDA_BUILD_STATE'] = 'RENDER'\n data = {}\n\n def _setup(**kw): data.update(kw)\n setuptools.setup = _setup\n return data\n\nmeta_src = jinja2.Template(open(\"conda.recipe/meta.yaml\").read())\ntry:\n meta_src = yaml.load(meta_src.render(load_setup_py_data=load_setup_py_data),\n Loader=yaml.FullLoader)\nexcept AttributeError as e:\n # Loader=yaml.FullLoader added in pyyaml 5.1 because of:\n # https://github.com/yaml/pyyaml/wiki/PyYAML-yaml.load(input)-Deprecation\n # isn't available on conda for python=3.5\n # fall back to calling without loader if it isn't available\n if 'FullLoader' in repr(e):\n meta_src = yaml.load(meta_src.render(load_setup_py_data=load_setup_py_data))\n else:\n raise\n\nsection = {\n \"build\" : meta_src[\"requirements\"][\"build\"],\n \"deploy\" : meta_src[\"extra\"][\"deploy\"],\n \"run\" : meta_src[\"requirements\"][\"run\"],\n \"test\" : meta_src[\"test\"][\"requires\"],\n}\n\nspec = []\nfor name in sys.argv[1:]:\n spec += section[name]\n\n# bare python unpins python version causing upgrade to latest\nif 'python' in spec: spec.remove('python')\n\n# add double quotes to specs for windows, fixes #9065\nif \"windows\" in platform.platform().lower():\n spec = ['\"{}\"'.format(s) for s in spec]\n\ndeps = \"\"\ndeps += \" \".join(s for s in spec)\ndeps = deps.replace(' >=', '>=') # conda syntax doesn't allow spaces b/w pkg name and version spec\ndeps = deps.replace(' <', '<')\ndeps = deps.replace(' [unix]', ' ')\n\nprint(deps)\n", "path": "scripts/deps.py"}]}
| 3,901 | 505 |
gh_patches_debug_2895
|
rasdani/github-patches
|
git_diff
|
quantumlib__Cirq-5072
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[cirqflow] `KeyValueExecutableSpec` should provide a `to_dict` method / override `__getitem__`
**Is your feature request related to a use case or problem? Please describe.**
`cg.KeyValueExecutableSpec` provides a nice `from_dict()` method to convert a dict into a `Tuple[Tuple[str, Any], ...]` which is hashable. This is useful when constructing the executable spec. However, using the executable spec during analysis of the results forces one to use the stored tuples, which is cumbersome.
**Describe the solution you'd like**
The class should provide a similar `to_dict` method which can convert the stored `key_value_pairs` to a dictionary and return -- which are much easier to work with. Though the method would be a simple `return dict(self.key_value_pairs)`, there might be some value in explicitly having it on the class. We can also consider providing a custom `__getitem__` method.
**What is the urgency from your perspective for this issue? Is it blocking important work?**
P1 - I need this no later than the next release (end of quarter)
cc @mpharrigan
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `cirq-google/cirq_google/workflow/quantum_executable.py`
Content:
```
1 # Copyright 2021 The Cirq Developers
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # https://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Data structures for programs executable on a quantum runtime."""
16
17 import abc
18 import dataclasses
19 from dataclasses import dataclass
20 from typing import Union, Tuple, Optional, Sequence, cast, Dict, Any, List, Iterator
21
22 import cirq
23 from cirq import _compat, study
24
25
26 class ExecutableSpec(metaclass=abc.ABCMeta):
27 """Specification metadata about an executable.
28
29 Subclasses should add problem-specific fields.
30 """
31
32 executable_family: str = NotImplemented
33 """A unique name to group executables."""
34
35
36 @dataclass(frozen=True)
37 class KeyValueExecutableSpec(ExecutableSpec):
38 """A generic executable spec whose metadata is a list of key-value pairs.
39
40 The key-value pairs define an implicit data schema. Consider defining a problem-specific
41 subclass of `ExecutableSpec` instead of using this class to realize the benefits of having
42 an explicit schema.
43
44 See Also:
45 `KeyValueExecutableSpec.from_dict` will use a dictionary to populate `key_value_pairs`.
46
47 Args:
48 executable_family: A unique name to group executables.
49 key_value_pairs: A tuple of key-value pairs. The keys should be strings but the values
50 can be any immutable object.
51 """
52
53 executable_family: str
54 key_value_pairs: Tuple[Tuple[str, Any], ...] = ()
55
56 @classmethod
57 def _json_namespace_(cls) -> str:
58 return 'cirq.google'
59
60 def _json_dict_(self) -> Dict[str, Any]:
61 return cirq.dataclass_json_dict(self)
62
63 @classmethod
64 def from_dict(cls, d: Dict[str, Any], *, executable_family: str) -> 'KeyValueExecutableSpec':
65 return cls(
66 executable_family=executable_family,
67 key_value_pairs=tuple((k, v) for k, v in d.items()),
68 )
69
70 @classmethod
71 def _from_json_dict_(
72 cls, executable_family: str, key_value_pairs: List[List[Union[str, Any]]], **kwargs
73 ) -> 'KeyValueExecutableSpec':
74 return cls(
75 executable_family=executable_family,
76 key_value_pairs=tuple((k, v) for k, v in key_value_pairs),
77 )
78
79 def __repr__(self) -> str:
80 return cirq._compat.dataclass_repr(self, namespace='cirq_google')
81
82
83 @dataclass(frozen=True)
84 class BitstringsMeasurement:
85 """Use in-circuit MeasurementGate to collect many repetitions of strings of bits.
86
87 This is the lowest-level measurement type allowed in `QuantumExecutable` and behaves
88 identically to the `cirq.Sampler.run` function. The executable's circuit must contain
89 explicit measurement gates.
90
91 Args:
92 n_repeitions: The number of repetitions to execute the circuit.
93 """
94
95 n_repetitions: int
96
97 @classmethod
98 def _json_namespace_(cls) -> str:
99 return 'cirq.google'
100
101 def _json_dict_(self):
102 return cirq.dataclass_json_dict(self)
103
104 def __repr__(self):
105 return cirq._compat.dataclass_repr(self, namespace='cirq_google')
106
107
108 TParamPair = Tuple[cirq.TParamKey, cirq.TParamVal]
109
110
111 @dataclass(frozen=True)
112 class QuantumExecutable:
113 """An executable quantum program.
114
115 This serves a similar purpose to `cirq.Circuit` with some key differences. First, a quantum
116 executable contains all the relevant context for execution including parameters as well as
117 the desired number of repetitions. Second, this object is immutable. Finally, there are
118 optional fields enabling a higher level of abstraction for certain aspects of the executable.
119
120 Attributes:
121 circuit: A `cirq.Circuit` describing the quantum operations to execute.
122 measurement: A description of the measurement properties or process.
123 params: An immutable `cirq.ParamResolver` (or similar type). It's representation is
124 normalized to a tuple of key value pairs.
125 spec: Optional `cg.ExecutableSpec` containing metadata about this executable that is not
126 used by the quantum runtime, but will be forwarded to all downstream result objects.
127 problem_topology: Optional `cirq.NamedTopology` instance specifying the topology of the
128 circuit. This is useful when optimizing on-device layout. If none is provided we
129 assume `circuit` already has a valid on-device layout.
130 initial_state: A `cirq.ProductState` specifying the desired initial state before executing
131 `circuit`. If not specified, default to the all-zeros state.
132 """
133
134 circuit: cirq.FrozenCircuit
135 measurement: BitstringsMeasurement
136 params: Optional[Tuple[TParamPair, ...]] = None
137 spec: Optional[ExecutableSpec] = None
138 problem_topology: Optional[cirq.NamedTopology] = None
139 initial_state: Optional[cirq.ProductState] = None
140
141 # pylint: disable=missing-raises-doc
142 def __init__(
143 self,
144 circuit: cirq.AbstractCircuit,
145 measurement: BitstringsMeasurement,
146 params: Union[Sequence[TParamPair], cirq.ParamResolverOrSimilarType] = None,
147 spec: Optional[ExecutableSpec] = None,
148 problem_topology: Optional[cirq.NamedTopology] = None,
149 initial_state: Optional[cirq.ProductState] = None,
150 ):
151 """Initialize the quantum executable.
152
153 The actual fields in this class are immutable, but we allow more liberal input types
154 which will be frozen in this __init__ method.
155
156 Args:
157 circuit: The circuit. This will be frozen before being set as an attribute.
158 measurement: A description of the measurement properties or process.
159 params: A cirq.ParamResolverOrSimilarType which will be frozen into a tuple of
160 key value pairs.
161 spec: Specification metadata about this executable that is not used by the quantum
162 runtime, but is persisted in result objects to associate executables with results.
163 problem_topology: Description of the multiqubit gate topology present in the circuit.
164 If not specified, the circuit must be compatible with the device topology.
165 initial_state: How to initialize the quantum system before running `circuit`. If not
166 specified, the device will be initialized into the all-zeros state.
167 """
168
169 # We care a lot about mutability in this class. No object is truly immutable in Python,
170 # but we can get pretty close by following the example of dataclass(frozen=True), which
171 # deletes this class's __setattr__ magic method. To set values ever, we use
172 # object.__setattr__ in this __init__ function.
173 #
174 # We write our own __init__ function to be able to accept a wider range of input formats
175 # that can be easily converted to our native, immutable format.
176 object.__setattr__(self, 'circuit', circuit.freeze())
177 object.__setattr__(self, 'measurement', measurement)
178
179 if isinstance(params, tuple) and all(
180 isinstance(param_kv, tuple) and len(param_kv) == 2 for param_kv in params
181 ):
182 frozen_params = params
183 elif isinstance(params, Sequence) and all(
184 isinstance(param_kv, Sequence) and len(param_kv) == 2 for param_kv in params
185 ):
186 frozen_params = tuple((k, v) for k, v in params)
187 elif study.resolver._is_param_resolver_or_similar_type(params):
188 param_resolver = cirq.ParamResolver(cast(cirq.ParamResolverOrSimilarType, params))
189 frozen_params = tuple(param_resolver.param_dict.items())
190 else:
191 raise ValueError(f"`params` should be a ParamResolverOrSimilarType, not {params}.")
192 object.__setattr__(self, 'params', frozen_params)
193
194 object.__setattr__(self, 'spec', spec)
195 object.__setattr__(self, 'problem_topology', problem_topology)
196 object.__setattr__(self, 'initial_state', initial_state)
197
198 # Hash may be expensive to compute, especially for large circuits.
199 # This should be safe since this class should be immutable. This line will
200 # also check for hashibility of members at construction time.
201 object.__setattr__(self, '_hash', hash(dataclasses.astuple(self)))
202
203 def __str__(self):
204 return f'QuantumExecutable(spec={self.spec})'
205
206 def __repr__(self):
207 return _compat.dataclass_repr(self, namespace='cirq_google')
208
209 @classmethod
210 def _json_namespace_(cls) -> str:
211 return 'cirq.google'
212
213 def _json_dict_(self):
214 return cirq.dataclass_json_dict(self)
215
216
217 @dataclass(frozen=True)
218 class QuantumExecutableGroup:
219 """A collection of `QuantumExecutable`s.
220
221 Attributes:
222 executables: A tuple of `cg.QuantumExecutable`.
223 """
224
225 executables: Tuple[QuantumExecutable, ...]
226
227 def __init__(
228 self,
229 executables: Sequence[QuantumExecutable],
230 ):
231 """Initialize and normalize the quantum executable group.
232
233 Args:
234 executables: A sequence of `cg.QuantumExecutable` which will be frozen into a
235 tuple.
236 """
237
238 if not isinstance(executables, tuple):
239 executables = tuple(executables)
240 object.__setattr__(self, 'executables', executables)
241
242 object.__setattr__(self, '_hash', hash(dataclasses.astuple(self)))
243
244 def __len__(self) -> int:
245 return len(self.executables)
246
247 def __iter__(self) -> Iterator[QuantumExecutable]:
248 yield from self.executables
249
250 def __str__(self) -> str:
251 exe_str = ', '.join(str(exe) for exe in self.executables[:2])
252 if len(self.executables) > 2:
253 exe_str += ', ...'
254
255 return f'QuantumExecutableGroup(executables=[{exe_str}])'
256
257 def __repr__(self) -> str:
258 return _compat.dataclass_repr(self, namespace='cirq_google')
259
260 def __hash__(self) -> int:
261 return self._hash # type: ignore
262
263 @classmethod
264 def _json_namespace_(cls) -> str:
265 return 'cirq.google'
266
267 def _json_dict_(self) -> Dict[str, Any]:
268 return cirq.dataclass_json_dict(self)
269
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/cirq-google/cirq_google/workflow/quantum_executable.py b/cirq-google/cirq_google/workflow/quantum_executable.py
--- a/cirq-google/cirq_google/workflow/quantum_executable.py
+++ b/cirq-google/cirq_google/workflow/quantum_executable.py
@@ -53,6 +53,9 @@
executable_family: str
key_value_pairs: Tuple[Tuple[str, Any], ...] = ()
+ def to_dict(self) -> Dict[str, Any]:
+ return dict(self.key_value_pairs)
+
@classmethod
def _json_namespace_(cls) -> str:
return 'cirq.google'
|
{"golden_diff": "diff --git a/cirq-google/cirq_google/workflow/quantum_executable.py b/cirq-google/cirq_google/workflow/quantum_executable.py\n--- a/cirq-google/cirq_google/workflow/quantum_executable.py\n+++ b/cirq-google/cirq_google/workflow/quantum_executable.py\n@@ -53,6 +53,9 @@\n executable_family: str\n key_value_pairs: Tuple[Tuple[str, Any], ...] = ()\n \n+ def to_dict(self) -> Dict[str, Any]:\n+ return dict(self.key_value_pairs)\n+\n @classmethod\n def _json_namespace_(cls) -> str:\n return 'cirq.google'\n", "issue": "[cirqflow] `KeyValueExecutableSpec` should provide a `to_dict` method / override `__getitem__`\n**Is your feature request related to a use case or problem? Please describe.**\r\n`cg.KeyValueExecutableSpec` provides a nice `from_dict()` method to convert a dict into a `Tuple[Tuple[str, Any], ...]` which is hashable. This is useful when constructing the executable spec. However, using the executable spec during analysis of the results forces one to use the stored tuples, which is cumbersome.\r\n\r\n**Describe the solution you'd like**\r\nThe class should provide a similar `to_dict` method which can convert the stored `key_value_pairs` to a dictionary and return -- which are much easier to work with. Though the method would be a simple `return dict(self.key_value_pairs)`, there might be some value in explicitly having it on the class. We can also consider providing a custom `__getitem__` method. \r\n\r\n**What is the urgency from your perspective for this issue? Is it blocking important work?**\r\nP1 - I need this no later than the next release (end of quarter)\r\n\r\ncc @mpharrigan \n", "before_files": [{"content": "# Copyright 2021 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Data structures for programs executable on a quantum runtime.\"\"\"\n\nimport abc\nimport dataclasses\nfrom dataclasses import dataclass\nfrom typing import Union, Tuple, Optional, Sequence, cast, Dict, Any, List, Iterator\n\nimport cirq\nfrom cirq import _compat, study\n\n\nclass ExecutableSpec(metaclass=abc.ABCMeta):\n \"\"\"Specification metadata about an executable.\n\n Subclasses should add problem-specific fields.\n \"\"\"\n\n executable_family: str = NotImplemented\n \"\"\"A unique name to group executables.\"\"\"\n\n\n@dataclass(frozen=True)\nclass KeyValueExecutableSpec(ExecutableSpec):\n \"\"\"A generic executable spec whose metadata is a list of key-value pairs.\n\n The key-value pairs define an implicit data schema. Consider defining a problem-specific\n subclass of `ExecutableSpec` instead of using this class to realize the benefits of having\n an explicit schema.\n\n See Also:\n `KeyValueExecutableSpec.from_dict` will use a dictionary to populate `key_value_pairs`.\n\n Args:\n executable_family: A unique name to group executables.\n key_value_pairs: A tuple of key-value pairs. The keys should be strings but the values\n can be any immutable object.\n \"\"\"\n\n executable_family: str\n key_value_pairs: Tuple[Tuple[str, Any], ...] = ()\n\n @classmethod\n def _json_namespace_(cls) -> str:\n return 'cirq.google'\n\n def _json_dict_(self) -> Dict[str, Any]:\n return cirq.dataclass_json_dict(self)\n\n @classmethod\n def from_dict(cls, d: Dict[str, Any], *, executable_family: str) -> 'KeyValueExecutableSpec':\n return cls(\n executable_family=executable_family,\n key_value_pairs=tuple((k, v) for k, v in d.items()),\n )\n\n @classmethod\n def _from_json_dict_(\n cls, executable_family: str, key_value_pairs: List[List[Union[str, Any]]], **kwargs\n ) -> 'KeyValueExecutableSpec':\n return cls(\n executable_family=executable_family,\n key_value_pairs=tuple((k, v) for k, v in key_value_pairs),\n )\n\n def __repr__(self) -> str:\n return cirq._compat.dataclass_repr(self, namespace='cirq_google')\n\n\n@dataclass(frozen=True)\nclass BitstringsMeasurement:\n \"\"\"Use in-circuit MeasurementGate to collect many repetitions of strings of bits.\n\n This is the lowest-level measurement type allowed in `QuantumExecutable` and behaves\n identically to the `cirq.Sampler.run` function. The executable's circuit must contain\n explicit measurement gates.\n\n Args:\n n_repeitions: The number of repetitions to execute the circuit.\n \"\"\"\n\n n_repetitions: int\n\n @classmethod\n def _json_namespace_(cls) -> str:\n return 'cirq.google'\n\n def _json_dict_(self):\n return cirq.dataclass_json_dict(self)\n\n def __repr__(self):\n return cirq._compat.dataclass_repr(self, namespace='cirq_google')\n\n\nTParamPair = Tuple[cirq.TParamKey, cirq.TParamVal]\n\n\n@dataclass(frozen=True)\nclass QuantumExecutable:\n \"\"\"An executable quantum program.\n\n This serves a similar purpose to `cirq.Circuit` with some key differences. First, a quantum\n executable contains all the relevant context for execution including parameters as well as\n the desired number of repetitions. Second, this object is immutable. Finally, there are\n optional fields enabling a higher level of abstraction for certain aspects of the executable.\n\n Attributes:\n circuit: A `cirq.Circuit` describing the quantum operations to execute.\n measurement: A description of the measurement properties or process.\n params: An immutable `cirq.ParamResolver` (or similar type). It's representation is\n normalized to a tuple of key value pairs.\n spec: Optional `cg.ExecutableSpec` containing metadata about this executable that is not\n used by the quantum runtime, but will be forwarded to all downstream result objects.\n problem_topology: Optional `cirq.NamedTopology` instance specifying the topology of the\n circuit. This is useful when optimizing on-device layout. If none is provided we\n assume `circuit` already has a valid on-device layout.\n initial_state: A `cirq.ProductState` specifying the desired initial state before executing\n `circuit`. If not specified, default to the all-zeros state.\n \"\"\"\n\n circuit: cirq.FrozenCircuit\n measurement: BitstringsMeasurement\n params: Optional[Tuple[TParamPair, ...]] = None\n spec: Optional[ExecutableSpec] = None\n problem_topology: Optional[cirq.NamedTopology] = None\n initial_state: Optional[cirq.ProductState] = None\n\n # pylint: disable=missing-raises-doc\n def __init__(\n self,\n circuit: cirq.AbstractCircuit,\n measurement: BitstringsMeasurement,\n params: Union[Sequence[TParamPair], cirq.ParamResolverOrSimilarType] = None,\n spec: Optional[ExecutableSpec] = None,\n problem_topology: Optional[cirq.NamedTopology] = None,\n initial_state: Optional[cirq.ProductState] = None,\n ):\n \"\"\"Initialize the quantum executable.\n\n The actual fields in this class are immutable, but we allow more liberal input types\n which will be frozen in this __init__ method.\n\n Args:\n circuit: The circuit. This will be frozen before being set as an attribute.\n measurement: A description of the measurement properties or process.\n params: A cirq.ParamResolverOrSimilarType which will be frozen into a tuple of\n key value pairs.\n spec: Specification metadata about this executable that is not used by the quantum\n runtime, but is persisted in result objects to associate executables with results.\n problem_topology: Description of the multiqubit gate topology present in the circuit.\n If not specified, the circuit must be compatible with the device topology.\n initial_state: How to initialize the quantum system before running `circuit`. If not\n specified, the device will be initialized into the all-zeros state.\n \"\"\"\n\n # We care a lot about mutability in this class. No object is truly immutable in Python,\n # but we can get pretty close by following the example of dataclass(frozen=True), which\n # deletes this class's __setattr__ magic method. To set values ever, we use\n # object.__setattr__ in this __init__ function.\n #\n # We write our own __init__ function to be able to accept a wider range of input formats\n # that can be easily converted to our native, immutable format.\n object.__setattr__(self, 'circuit', circuit.freeze())\n object.__setattr__(self, 'measurement', measurement)\n\n if isinstance(params, tuple) and all(\n isinstance(param_kv, tuple) and len(param_kv) == 2 for param_kv in params\n ):\n frozen_params = params\n elif isinstance(params, Sequence) and all(\n isinstance(param_kv, Sequence) and len(param_kv) == 2 for param_kv in params\n ):\n frozen_params = tuple((k, v) for k, v in params)\n elif study.resolver._is_param_resolver_or_similar_type(params):\n param_resolver = cirq.ParamResolver(cast(cirq.ParamResolverOrSimilarType, params))\n frozen_params = tuple(param_resolver.param_dict.items())\n else:\n raise ValueError(f\"`params` should be a ParamResolverOrSimilarType, not {params}.\")\n object.__setattr__(self, 'params', frozen_params)\n\n object.__setattr__(self, 'spec', spec)\n object.__setattr__(self, 'problem_topology', problem_topology)\n object.__setattr__(self, 'initial_state', initial_state)\n\n # Hash may be expensive to compute, especially for large circuits.\n # This should be safe since this class should be immutable. This line will\n # also check for hashibility of members at construction time.\n object.__setattr__(self, '_hash', hash(dataclasses.astuple(self)))\n\n def __str__(self):\n return f'QuantumExecutable(spec={self.spec})'\n\n def __repr__(self):\n return _compat.dataclass_repr(self, namespace='cirq_google')\n\n @classmethod\n def _json_namespace_(cls) -> str:\n return 'cirq.google'\n\n def _json_dict_(self):\n return cirq.dataclass_json_dict(self)\n\n\n@dataclass(frozen=True)\nclass QuantumExecutableGroup:\n \"\"\"A collection of `QuantumExecutable`s.\n\n Attributes:\n executables: A tuple of `cg.QuantumExecutable`.\n \"\"\"\n\n executables: Tuple[QuantumExecutable, ...]\n\n def __init__(\n self,\n executables: Sequence[QuantumExecutable],\n ):\n \"\"\"Initialize and normalize the quantum executable group.\n\n Args:\n executables: A sequence of `cg.QuantumExecutable` which will be frozen into a\n tuple.\n \"\"\"\n\n if not isinstance(executables, tuple):\n executables = tuple(executables)\n object.__setattr__(self, 'executables', executables)\n\n object.__setattr__(self, '_hash', hash(dataclasses.astuple(self)))\n\n def __len__(self) -> int:\n return len(self.executables)\n\n def __iter__(self) -> Iterator[QuantumExecutable]:\n yield from self.executables\n\n def __str__(self) -> str:\n exe_str = ', '.join(str(exe) for exe in self.executables[:2])\n if len(self.executables) > 2:\n exe_str += ', ...'\n\n return f'QuantumExecutableGroup(executables=[{exe_str}])'\n\n def __repr__(self) -> str:\n return _compat.dataclass_repr(self, namespace='cirq_google')\n\n def __hash__(self) -> int:\n return self._hash # type: ignore\n\n @classmethod\n def _json_namespace_(cls) -> str:\n return 'cirq.google'\n\n def _json_dict_(self) -> Dict[str, Any]:\n return cirq.dataclass_json_dict(self)\n", "path": "cirq-google/cirq_google/workflow/quantum_executable.py"}], "after_files": [{"content": "# Copyright 2021 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Data structures for programs executable on a quantum runtime.\"\"\"\n\nimport abc\nimport dataclasses\nfrom dataclasses import dataclass\nfrom typing import Union, Tuple, Optional, Sequence, cast, Dict, Any, List, Iterator\n\nimport cirq\nfrom cirq import _compat, study\n\n\nclass ExecutableSpec(metaclass=abc.ABCMeta):\n \"\"\"Specification metadata about an executable.\n\n Subclasses should add problem-specific fields.\n \"\"\"\n\n executable_family: str = NotImplemented\n \"\"\"A unique name to group executables.\"\"\"\n\n\n@dataclass(frozen=True)\nclass KeyValueExecutableSpec(ExecutableSpec):\n \"\"\"A generic executable spec whose metadata is a list of key-value pairs.\n\n The key-value pairs define an implicit data schema. Consider defining a problem-specific\n subclass of `ExecutableSpec` instead of using this class to realize the benefits of having\n an explicit schema.\n\n See Also:\n `KeyValueExecutableSpec.from_dict` will use a dictionary to populate `key_value_pairs`.\n\n Args:\n executable_family: A unique name to group executables.\n key_value_pairs: A tuple of key-value pairs. The keys should be strings but the values\n can be any immutable object.\n \"\"\"\n\n executable_family: str\n key_value_pairs: Tuple[Tuple[str, Any], ...] = ()\n\n def to_dict(self) -> Dict[str, Any]:\n return dict(self.key_value_pairs)\n\n @classmethod\n def _json_namespace_(cls) -> str:\n return 'cirq.google'\n\n def _json_dict_(self) -> Dict[str, Any]:\n return cirq.dataclass_json_dict(self)\n\n @classmethod\n def from_dict(cls, d: Dict[str, Any], *, executable_family: str) -> 'KeyValueExecutableSpec':\n return cls(\n executable_family=executable_family,\n key_value_pairs=tuple((k, v) for k, v in d.items()),\n )\n\n @classmethod\n def _from_json_dict_(\n cls, executable_family: str, key_value_pairs: List[List[Union[str, Any]]], **kwargs\n ) -> 'KeyValueExecutableSpec':\n return cls(\n executable_family=executable_family,\n key_value_pairs=tuple((k, v) for k, v in key_value_pairs),\n )\n\n def __repr__(self) -> str:\n return cirq._compat.dataclass_repr(self, namespace='cirq_google')\n\n\n@dataclass(frozen=True)\nclass BitstringsMeasurement:\n \"\"\"Use in-circuit MeasurementGate to collect many repetitions of strings of bits.\n\n This is the lowest-level measurement type allowed in `QuantumExecutable` and behaves\n identically to the `cirq.Sampler.run` function. The executable's circuit must contain\n explicit measurement gates.\n\n Args:\n n_repeitions: The number of repetitions to execute the circuit.\n \"\"\"\n\n n_repetitions: int\n\n @classmethod\n def _json_namespace_(cls) -> str:\n return 'cirq.google'\n\n def _json_dict_(self):\n return cirq.dataclass_json_dict(self)\n\n def __repr__(self):\n return cirq._compat.dataclass_repr(self, namespace='cirq_google')\n\n\nTParamPair = Tuple[cirq.TParamKey, cirq.TParamVal]\n\n\n@dataclass(frozen=True)\nclass QuantumExecutable:\n \"\"\"An executable quantum program.\n\n This serves a similar purpose to `cirq.Circuit` with some key differences. First, a quantum\n executable contains all the relevant context for execution including parameters as well as\n the desired number of repetitions. Second, this object is immutable. Finally, there are\n optional fields enabling a higher level of abstraction for certain aspects of the executable.\n\n Attributes:\n circuit: A `cirq.Circuit` describing the quantum operations to execute.\n measurement: A description of the measurement properties or process.\n params: An immutable `cirq.ParamResolver` (or similar type). It's representation is\n normalized to a tuple of key value pairs.\n spec: Optional `cg.ExecutableSpec` containing metadata about this executable that is not\n used by the quantum runtime, but will be forwarded to all downstream result objects.\n problem_topology: Optional `cirq.NamedTopology` instance specifying the topology of the\n circuit. This is useful when optimizing on-device layout. If none is provided we\n assume `circuit` already has a valid on-device layout.\n initial_state: A `cirq.ProductState` specifying the desired initial state before executing\n `circuit`. If not specified, default to the all-zeros state.\n \"\"\"\n\n circuit: cirq.FrozenCircuit\n measurement: BitstringsMeasurement\n params: Optional[Tuple[TParamPair, ...]] = None\n spec: Optional[ExecutableSpec] = None\n problem_topology: Optional[cirq.NamedTopology] = None\n initial_state: Optional[cirq.ProductState] = None\n\n # pylint: disable=missing-raises-doc\n def __init__(\n self,\n circuit: cirq.AbstractCircuit,\n measurement: BitstringsMeasurement,\n params: Union[Sequence[TParamPair], cirq.ParamResolverOrSimilarType] = None,\n spec: Optional[ExecutableSpec] = None,\n problem_topology: Optional[cirq.NamedTopology] = None,\n initial_state: Optional[cirq.ProductState] = None,\n ):\n \"\"\"Initialize the quantum executable.\n\n The actual fields in this class are immutable, but we allow more liberal input types\n which will be frozen in this __init__ method.\n\n Args:\n circuit: The circuit. This will be frozen before being set as an attribute.\n measurement: A description of the measurement properties or process.\n params: A cirq.ParamResolverOrSimilarType which will be frozen into a tuple of\n key value pairs.\n spec: Specification metadata about this executable that is not used by the quantum\n runtime, but is persisted in result objects to associate executables with results.\n problem_topology: Description of the multiqubit gate topology present in the circuit.\n If not specified, the circuit must be compatible with the device topology.\n initial_state: How to initialize the quantum system before running `circuit`. If not\n specified, the device will be initialized into the all-zeros state.\n \"\"\"\n\n # We care a lot about mutability in this class. No object is truly immutable in Python,\n # but we can get pretty close by following the example of dataclass(frozen=True), which\n # deletes this class's __setattr__ magic method. To set values ever, we use\n # object.__setattr__ in this __init__ function.\n #\n # We write our own __init__ function to be able to accept a wider range of input formats\n # that can be easily converted to our native, immutable format.\n object.__setattr__(self, 'circuit', circuit.freeze())\n object.__setattr__(self, 'measurement', measurement)\n\n if isinstance(params, tuple) and all(\n isinstance(param_kv, tuple) and len(param_kv) == 2 for param_kv in params\n ):\n frozen_params = params\n elif isinstance(params, Sequence) and all(\n isinstance(param_kv, Sequence) and len(param_kv) == 2 for param_kv in params\n ):\n frozen_params = tuple((k, v) for k, v in params)\n elif study.resolver._is_param_resolver_or_similar_type(params):\n param_resolver = cirq.ParamResolver(cast(cirq.ParamResolverOrSimilarType, params))\n frozen_params = tuple(param_resolver.param_dict.items())\n else:\n raise ValueError(f\"`params` should be a ParamResolverOrSimilarType, not {params}.\")\n object.__setattr__(self, 'params', frozen_params)\n\n object.__setattr__(self, 'spec', spec)\n object.__setattr__(self, 'problem_topology', problem_topology)\n object.__setattr__(self, 'initial_state', initial_state)\n\n # Hash may be expensive to compute, especially for large circuits.\n # This should be safe since this class should be immutable. This line will\n # also check for hashibility of members at construction time.\n object.__setattr__(self, '_hash', hash(dataclasses.astuple(self)))\n\n def __str__(self):\n return f'QuantumExecutable(spec={self.spec})'\n\n def __repr__(self):\n return _compat.dataclass_repr(self, namespace='cirq_google')\n\n @classmethod\n def _json_namespace_(cls) -> str:\n return 'cirq.google'\n\n def _json_dict_(self):\n return cirq.dataclass_json_dict(self)\n\n\n@dataclass(frozen=True)\nclass QuantumExecutableGroup:\n \"\"\"A collection of `QuantumExecutable`s.\n\n Attributes:\n executables: A tuple of `cg.QuantumExecutable`.\n \"\"\"\n\n executables: Tuple[QuantumExecutable, ...]\n\n def __init__(\n self,\n executables: Sequence[QuantumExecutable],\n ):\n \"\"\"Initialize and normalize the quantum executable group.\n\n Args:\n executables: A sequence of `cg.QuantumExecutable` which will be frozen into a\n tuple.\n \"\"\"\n\n if not isinstance(executables, tuple):\n executables = tuple(executables)\n object.__setattr__(self, 'executables', executables)\n\n object.__setattr__(self, '_hash', hash(dataclasses.astuple(self)))\n\n def __len__(self) -> int:\n return len(self.executables)\n\n def __iter__(self) -> Iterator[QuantumExecutable]:\n yield from self.executables\n\n def __str__(self) -> str:\n exe_str = ', '.join(str(exe) for exe in self.executables[:2])\n if len(self.executables) > 2:\n exe_str += ', ...'\n\n return f'QuantumExecutableGroup(executables=[{exe_str}])'\n\n def __repr__(self) -> str:\n return _compat.dataclass_repr(self, namespace='cirq_google')\n\n def __hash__(self) -> int:\n return self._hash # type: ignore\n\n @classmethod\n def _json_namespace_(cls) -> str:\n return 'cirq.google'\n\n def _json_dict_(self) -> Dict[str, Any]:\n return cirq.dataclass_json_dict(self)\n", "path": "cirq-google/cirq_google/workflow/quantum_executable.py"}]}
| 3,558 | 148 |
gh_patches_debug_2612
|
rasdani/github-patches
|
git_diff
|
scikit-hep__pyhf-307
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add --version flag to pyhf CLI
# Description
As [suggested by Lukas](https://github.com/diana-hep/pyhf/pull/304#issuecomment-428856809), adding a `--version` flag to the pyhf CLI could be useful.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pyhf/commandline.py`
Content:
```
1 import logging
2 logging.basicConfig()
3 log = logging.getLogger(__name__)
4
5 import click
6 import json
7 import os
8 import jsonpatch
9 import sys
10
11 from . import readxml
12 from . import writexml
13 from .utils import runOnePoint
14 from .pdf import Model
15
16
17 @click.group(context_settings=dict(help_option_names=['-h', '--help']))
18 def pyhf():
19 pass
20
21 @pyhf.command()
22 @click.argument('entrypoint-xml', type=click.Path(exists=True))
23 @click.option('--basedir', help='The base directory for the XML files to point relative to.', type=click.Path(exists=True), default=os.getcwd())
24 @click.option('--output-file', help='The location of the output json file. If not specified, prints to screen.', default=None)
25 @click.option('--track-progress/--hide-progress', default=True)
26 def xml2json(entrypoint_xml, basedir, output_file, track_progress):
27 """ Entrypoint XML: The top-level XML file for the PDF definition. """
28 spec = readxml.parse(entrypoint_xml, basedir, track_progress=track_progress)
29 if output_file is None:
30 print(json.dumps(spec, indent=4, sort_keys=True))
31 else:
32 with open(output_file, 'w+') as out_file:
33 json.dump(spec, out_file, indent=4, sort_keys=True)
34 log.debug("Written to {0:s}".format(output_file))
35 sys.exit(0)
36
37 @pyhf.command()
38 @click.argument('workspace', default='-')
39 @click.argument('xmlfile', default='-')
40 @click.option('--specroot', default=click.Path(exists=True))
41 @click.option('--dataroot', default=click.Path(exists=True))
42 def json2xml(workspace, xmlfile, specroot, dataroot):
43 with click.open_file(workspace, 'r') as specstream:
44 d = json.load(specstream)
45 with click.open_file(xmlfile, 'w') as outstream:
46 outstream.write(writexml.writexml(d, specroot, dataroot,'').decode('utf-8'))
47 sys.exit(0)
48
49 @pyhf.command()
50 @click.argument('workspace', default='-')
51 @click.option('--output-file', help='The location of the output json file. If not specified, prints to screen.', default=None)
52 @click.option('--measurement', default=None)
53 @click.option('-p', '--patch', multiple=True)
54 @click.option('--qualify-names/--no-qualify-names', default=False)
55 def cls(workspace, output_file, measurement, qualify_names, patch):
56 with click.open_file(workspace, 'r') as specstream:
57 d = json.load(specstream)
58 measurements = d['toplvl']['measurements']
59 measurement_names = [m['name'] for m in measurements]
60 measurement_index = 0
61 log.debug('measurements defined:\n\t{0:s}'.format('\n\t'.join(measurement_names)))
62 if measurement and measurement not in measurement_names:
63 log.error('no measurement by name \'{0:s}\' exists, pick from one of the valid ones above'.format(measurement))
64 sys.exit(1)
65 else:
66 if not measurement and len(measurements) > 1:
67 log.warning('multiple measurements defined. Taking the first measurement.')
68 measurement_index = 0
69 elif measurement:
70 measurement_index = measurement_names.index(measurement)
71
72 log.debug('calculating CLs for measurement {0:s}'.format(measurements[measurement_index]['name']))
73 spec = {'channels':d['channels']}
74 for p in patch:
75 with click.open_file(p, 'r') as read_file:
76 p = jsonpatch.JsonPatch(json.loads(read_file.read()))
77 spec = p.apply(spec)
78 p = Model(spec, poiname=measurements[measurement_index]['config']['poi'], qualify_names=qualify_names)
79 result = runOnePoint(1.0, sum((d['data'][c['name']] for c in d['channels']),[]) + p.config.auxdata, p)
80 result = {'CLs_obs': result[-2].tolist()[0], 'CLs_exp': result[-1].ravel().tolist()}
81 if output_file is None:
82 print(json.dumps(result, indent=4, sort_keys=True))
83 else:
84 with open(output_file, 'w+') as out_file:
85 json.dump(result, out_file, indent=4, sort_keys=True)
86 log.debug("Written to {0:s}".format(output_file))
87 sys.exit(0)
88
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pyhf/commandline.py b/pyhf/commandline.py
--- a/pyhf/commandline.py
+++ b/pyhf/commandline.py
@@ -12,9 +12,11 @@
from . import writexml
from .utils import runOnePoint
from .pdf import Model
+from .version import __version__
@click.group(context_settings=dict(help_option_names=['-h', '--help']))
[email protected]_option(version=__version__)
def pyhf():
pass
|
{"golden_diff": "diff --git a/pyhf/commandline.py b/pyhf/commandline.py\n--- a/pyhf/commandline.py\n+++ b/pyhf/commandline.py\n@@ -12,9 +12,11 @@\n from . import writexml\n from .utils import runOnePoint\n from .pdf import Model\n+from .version import __version__\n \n \n @click.group(context_settings=dict(help_option_names=['-h', '--help']))\[email protected]_option(version=__version__)\n def pyhf():\n pass\n", "issue": "Add --version flag to pyhf CLI\n# Description\r\n\r\nAs [suggested by Lukas](https://github.com/diana-hep/pyhf/pull/304#issuecomment-428856809), adding a `--version` flag to the pyhf CLI could be useful.\n", "before_files": [{"content": "import logging\nlogging.basicConfig()\nlog = logging.getLogger(__name__)\n\nimport click\nimport json\nimport os\nimport jsonpatch\nimport sys\n\nfrom . import readxml\nfrom . import writexml\nfrom .utils import runOnePoint\nfrom .pdf import Model\n\n\[email protected](context_settings=dict(help_option_names=['-h', '--help']))\ndef pyhf():\n pass\n\[email protected]()\[email protected]('entrypoint-xml', type=click.Path(exists=True))\[email protected]('--basedir', help='The base directory for the XML files to point relative to.', type=click.Path(exists=True), default=os.getcwd())\[email protected]('--output-file', help='The location of the output json file. If not specified, prints to screen.', default=None)\[email protected]('--track-progress/--hide-progress', default=True)\ndef xml2json(entrypoint_xml, basedir, output_file, track_progress):\n \"\"\" Entrypoint XML: The top-level XML file for the PDF definition. \"\"\"\n spec = readxml.parse(entrypoint_xml, basedir, track_progress=track_progress)\n if output_file is None:\n print(json.dumps(spec, indent=4, sort_keys=True))\n else:\n with open(output_file, 'w+') as out_file:\n json.dump(spec, out_file, indent=4, sort_keys=True)\n log.debug(\"Written to {0:s}\".format(output_file))\n sys.exit(0)\n\[email protected]()\[email protected]('workspace', default='-')\[email protected]('xmlfile', default='-')\[email protected]('--specroot', default=click.Path(exists=True))\[email protected]('--dataroot', default=click.Path(exists=True))\ndef json2xml(workspace, xmlfile, specroot, dataroot):\n with click.open_file(workspace, 'r') as specstream:\n d = json.load(specstream)\n with click.open_file(xmlfile, 'w') as outstream:\n outstream.write(writexml.writexml(d, specroot, dataroot,'').decode('utf-8'))\n sys.exit(0)\n\[email protected]()\[email protected]('workspace', default='-')\[email protected]('--output-file', help='The location of the output json file. If not specified, prints to screen.', default=None)\[email protected]('--measurement', default=None)\[email protected]('-p', '--patch', multiple=True)\[email protected]('--qualify-names/--no-qualify-names', default=False)\ndef cls(workspace, output_file, measurement, qualify_names, patch):\n with click.open_file(workspace, 'r') as specstream:\n d = json.load(specstream)\n measurements = d['toplvl']['measurements']\n measurement_names = [m['name'] for m in measurements]\n measurement_index = 0\n log.debug('measurements defined:\\n\\t{0:s}'.format('\\n\\t'.join(measurement_names)))\n if measurement and measurement not in measurement_names:\n log.error('no measurement by name \\'{0:s}\\' exists, pick from one of the valid ones above'.format(measurement))\n sys.exit(1)\n else:\n if not measurement and len(measurements) > 1:\n log.warning('multiple measurements defined. Taking the first measurement.')\n measurement_index = 0\n elif measurement:\n measurement_index = measurement_names.index(measurement)\n\n log.debug('calculating CLs for measurement {0:s}'.format(measurements[measurement_index]['name']))\n spec = {'channels':d['channels']}\n for p in patch:\n with click.open_file(p, 'r') as read_file:\n p = jsonpatch.JsonPatch(json.loads(read_file.read()))\n spec = p.apply(spec)\n p = Model(spec, poiname=measurements[measurement_index]['config']['poi'], qualify_names=qualify_names)\n result = runOnePoint(1.0, sum((d['data'][c['name']] for c in d['channels']),[]) + p.config.auxdata, p)\n result = {'CLs_obs': result[-2].tolist()[0], 'CLs_exp': result[-1].ravel().tolist()}\n if output_file is None:\n print(json.dumps(result, indent=4, sort_keys=True))\n else:\n with open(output_file, 'w+') as out_file:\n json.dump(result, out_file, indent=4, sort_keys=True)\n log.debug(\"Written to {0:s}\".format(output_file))\n sys.exit(0)\n", "path": "pyhf/commandline.py"}], "after_files": [{"content": "import logging\nlogging.basicConfig()\nlog = logging.getLogger(__name__)\n\nimport click\nimport json\nimport os\nimport jsonpatch\nimport sys\n\nfrom . import readxml\nfrom . import writexml\nfrom .utils import runOnePoint\nfrom .pdf import Model\nfrom .version import __version__\n\n\[email protected](context_settings=dict(help_option_names=['-h', '--help']))\[email protected]_option(version=__version__)\ndef pyhf():\n pass\n\[email protected]()\[email protected]('entrypoint-xml', type=click.Path(exists=True))\[email protected]('--basedir', help='The base directory for the XML files to point relative to.', type=click.Path(exists=True), default=os.getcwd())\[email protected]('--output-file', help='The location of the output json file. If not specified, prints to screen.', default=None)\[email protected]('--track-progress/--hide-progress', default=True)\ndef xml2json(entrypoint_xml, basedir, output_file, track_progress):\n \"\"\" Entrypoint XML: The top-level XML file for the PDF definition. \"\"\"\n spec = readxml.parse(entrypoint_xml, basedir, track_progress=track_progress)\n if output_file is None:\n print(json.dumps(spec, indent=4, sort_keys=True))\n else:\n with open(output_file, 'w+') as out_file:\n json.dump(spec, out_file, indent=4, sort_keys=True)\n log.debug(\"Written to {0:s}\".format(output_file))\n sys.exit(0)\n\[email protected]()\[email protected]('workspace', default='-')\[email protected]('xmlfile', default='-')\[email protected]('--specroot', default=click.Path(exists=True))\[email protected]('--dataroot', default=click.Path(exists=True))\ndef json2xml(workspace, xmlfile, specroot, dataroot):\n with click.open_file(workspace, 'r') as specstream:\n d = json.load(specstream)\n with click.open_file(xmlfile, 'w') as outstream:\n outstream.write(writexml.writexml(d, specroot, dataroot,'').decode('utf-8'))\n sys.exit(0)\n\[email protected]()\[email protected]('workspace', default='-')\[email protected]('--output-file', help='The location of the output json file. If not specified, prints to screen.', default=None)\[email protected]('--measurement', default=None)\[email protected]('-p', '--patch', multiple=True)\[email protected]('--qualify-names/--no-qualify-names', default=False)\ndef cls(workspace, output_file, measurement, qualify_names, patch):\n with click.open_file(workspace, 'r') as specstream:\n d = json.load(specstream)\n measurements = d['toplvl']['measurements']\n measurement_names = [m['name'] for m in measurements]\n measurement_index = 0\n log.debug('measurements defined:\\n\\t{0:s}'.format('\\n\\t'.join(measurement_names)))\n if measurement and measurement not in measurement_names:\n log.error('no measurement by name \\'{0:s}\\' exists, pick from one of the valid ones above'.format(measurement))\n sys.exit(1)\n else:\n if not measurement and len(measurements) > 1:\n log.warning('multiple measurements defined. Taking the first measurement.')\n measurement_index = 0\n elif measurement:\n measurement_index = measurement_names.index(measurement)\n\n log.debug('calculating CLs for measurement {0:s}'.format(measurements[measurement_index]['name']))\n spec = {'channels':d['channels']}\n for p in patch:\n with click.open_file(p, 'r') as read_file:\n p = jsonpatch.JsonPatch(json.loads(read_file.read()))\n spec = p.apply(spec)\n p = Model(spec, poiname=measurements[measurement_index]['config']['poi'], qualify_names=qualify_names)\n result = runOnePoint(1.0, sum((d['data'][c['name']] for c in d['channels']),[]) + p.config.auxdata, p)\n result = {'CLs_obs': result[-2].tolist()[0], 'CLs_exp': result[-1].ravel().tolist()}\n if output_file is None:\n print(json.dumps(result, indent=4, sort_keys=True))\n else:\n with open(output_file, 'w+') as out_file:\n json.dump(result, out_file, indent=4, sort_keys=True)\n log.debug(\"Written to {0:s}\".format(output_file))\n sys.exit(0)\n", "path": "pyhf/commandline.py"}]}
| 1,453 | 107 |
gh_patches_debug_33199
|
rasdani/github-patches
|
git_diff
|
python-poetry__poetry-1395
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
poetry shell does not activate virtualenv
<!-- Checked checkbox should look like this: [x] -->
- [x] I am on the [latest](https://github.com/sdispater/poetry/releases/latest) Poetry version.
- [x] I have searched the [issues](https://github.com/sdispater/poetry/issues) of this repo and believe that this is not a duplicate.
<!--
Once those are done, if you're able to fill in the following list with your information,
it'd be very helpful to whoever handles the issue.
-->
- **OS version and name**: Mac OS X, High Sierra
- **Poetry version**: 0.12.5
## Issue
Similar to ```pipenv shell```, I would have expected that when running ```poetry shell``` the virtualenv gets activated, but apparently this is not the case...
```console
➜ which python
/Users/timon/.pyenv/shims/python
➜ poetry shell
Spawning shell within /Users/timon/Library/Caches/pypoetry/virtualenvs/YOLO-SAR-py3.7
➜ which python
/Users/timon/.pyenv/shims/python
➜ source /Users/timon/Library/Caches/pypoetry/virtualenvs/yolo-sar-py3.7/bin/activate
➜ which python
/Users/timon/Library/Caches/pypoetry/virtualenvs/yolo-sar-py3.7/bin/python
```
for comparison
```console
➜ poetry run which python
/Users/timon/Library/Caches/pypoetry/virtualenvs/yolo-sar-py3.7/bin/python
```
Am I misunderstanding something and this is expected behaviour or is it a bug?
Thanks a lot already for your time :)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `poetry/utils/shell.py`
Content:
```
1 import os
2
3 from shellingham import detect_shell
4 from shellingham import ShellDetectionFailure
5
6
7 class Shell:
8 """
9 Represents the current shell.
10 """
11
12 _shell = None
13
14 def __init__(self, name, path): # type: (str, str) -> None
15 self._name = name
16 self._path = path
17
18 @property
19 def name(self): # type: () -> str
20 return self._name
21
22 @property
23 def path(self): # type: () -> str
24 return self._path
25
26 @classmethod
27 def get(cls): # type: () -> Shell
28 """
29 Retrieve the current shell.
30 """
31 if cls._shell is not None:
32 return cls._shell
33
34 try:
35 name, path = detect_shell(os.getpid())
36 except (RuntimeError, ShellDetectionFailure):
37 raise RuntimeError("Unable to detect the current shell.")
38
39 cls._shell = cls(name, path)
40
41 return cls._shell
42
43 def __repr__(self): # type: () -> str
44 return '{}("{}", "{}")'.format(self.__class__.__name__, self._name, self._path)
45
```
Path: `poetry/console/commands/shell.py`
Content:
```
1 import sys
2
3 from os import environ
4 from distutils.util import strtobool
5
6 from .env_command import EnvCommand
7
8
9 class ShellCommand(EnvCommand):
10
11 name = "shell"
12 description = "Spawns a shell within the virtual environment."
13
14 help = """The <info>shell</> command spawns a shell, according to the
15 <comment>$SHELL</> environment variable, within the virtual environment.
16 If one doesn't exist yet, it will be created.
17 """
18
19 def handle(self):
20 from poetry.utils.shell import Shell
21
22 # Check if it's already activated or doesn't exist and won't be created
23 venv_activated = strtobool(environ.get("POETRY_ACTIVE", "0")) or getattr(
24 sys, "real_prefix", sys.prefix
25 ) == str(self.env.path)
26 if venv_activated:
27 self.line(
28 "Virtual environment already activated: "
29 "<info>{}</>".format(self.env.path)
30 )
31
32 return
33
34 self.line("Spawning shell within <info>{}</>".format(self.env.path))
35
36 # Setting this to avoid spawning unnecessary nested shells
37 environ["POETRY_ACTIVE"] = "1"
38 shell = Shell.get()
39 self.env.execute(shell.path)
40 environ.pop("POETRY_ACTIVE")
41
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/poetry/console/commands/shell.py b/poetry/console/commands/shell.py
--- a/poetry/console/commands/shell.py
+++ b/poetry/console/commands/shell.py
@@ -36,5 +36,5 @@
# Setting this to avoid spawning unnecessary nested shells
environ["POETRY_ACTIVE"] = "1"
shell = Shell.get()
- self.env.execute(shell.path)
+ shell.activate(self.env)
environ.pop("POETRY_ACTIVE")
diff --git a/poetry/utils/shell.py b/poetry/utils/shell.py
--- a/poetry/utils/shell.py
+++ b/poetry/utils/shell.py
@@ -1,8 +1,16 @@
import os
+import signal
+import sys
+import pexpect
+
+from clikit.utils.terminal import Terminal
from shellingham import detect_shell
from shellingham import ShellDetectionFailure
+from ._compat import WINDOWS
+from .env import VirtualEnv
+
class Shell:
"""
@@ -40,5 +48,51 @@
return cls._shell
+ def activate(self, env): # type: (VirtualEnv) -> None
+ if WINDOWS:
+ return env.execute(self.path)
+
+ terminal = Terminal()
+ with env.temp_environ():
+ c = pexpect.spawn(
+ self._path, ["-i"], dimensions=(terminal.height, terminal.width)
+ )
+
+ c.setecho(False)
+ activate_script = self._get_activate_script()
+ bin_dir = "Scripts" if WINDOWS else "bin"
+ activate_path = env.path / bin_dir / activate_script
+ c.sendline("{} {}".format(self._get_source_command(), activate_path))
+
+ def resize(sig, data):
+ terminal = Terminal()
+ c.setwinsize(terminal.height, terminal.width)
+
+ signal.signal(signal.SIGWINCH, resize)
+
+ # Interact with the new shell.
+ c.interact(escape_character=None)
+ c.close()
+
+ sys.exit(c.exitstatus)
+
+ def _get_activate_script(self):
+ if "fish" == self._name:
+ suffix = ".fish"
+ elif "csh" == self._name:
+ suffix = ".csh"
+ else:
+ suffix = ""
+
+ return "activate" + suffix
+
+ def _get_source_command(self):
+ if "fish" == self._name:
+ return "source"
+ elif "csh" == self._name:
+ return "source"
+
+ return "."
+
def __repr__(self): # type: () -> str
return '{}("{}", "{}")'.format(self.__class__.__name__, self._name, self._path)
|
{"golden_diff": "diff --git a/poetry/console/commands/shell.py b/poetry/console/commands/shell.py\n--- a/poetry/console/commands/shell.py\n+++ b/poetry/console/commands/shell.py\n@@ -36,5 +36,5 @@\n # Setting this to avoid spawning unnecessary nested shells\n environ[\"POETRY_ACTIVE\"] = \"1\"\n shell = Shell.get()\n- self.env.execute(shell.path)\n+ shell.activate(self.env)\n environ.pop(\"POETRY_ACTIVE\")\ndiff --git a/poetry/utils/shell.py b/poetry/utils/shell.py\n--- a/poetry/utils/shell.py\n+++ b/poetry/utils/shell.py\n@@ -1,8 +1,16 @@\n import os\n+import signal\n+import sys\n \n+import pexpect\n+\n+from clikit.utils.terminal import Terminal\n from shellingham import detect_shell\n from shellingham import ShellDetectionFailure\n \n+from ._compat import WINDOWS\n+from .env import VirtualEnv\n+\n \n class Shell:\n \"\"\"\n@@ -40,5 +48,51 @@\n \n return cls._shell\n \n+ def activate(self, env): # type: (VirtualEnv) -> None\n+ if WINDOWS:\n+ return env.execute(self.path)\n+\n+ terminal = Terminal()\n+ with env.temp_environ():\n+ c = pexpect.spawn(\n+ self._path, [\"-i\"], dimensions=(terminal.height, terminal.width)\n+ )\n+\n+ c.setecho(False)\n+ activate_script = self._get_activate_script()\n+ bin_dir = \"Scripts\" if WINDOWS else \"bin\"\n+ activate_path = env.path / bin_dir / activate_script\n+ c.sendline(\"{} {}\".format(self._get_source_command(), activate_path))\n+\n+ def resize(sig, data):\n+ terminal = Terminal()\n+ c.setwinsize(terminal.height, terminal.width)\n+\n+ signal.signal(signal.SIGWINCH, resize)\n+\n+ # Interact with the new shell.\n+ c.interact(escape_character=None)\n+ c.close()\n+\n+ sys.exit(c.exitstatus)\n+\n+ def _get_activate_script(self):\n+ if \"fish\" == self._name:\n+ suffix = \".fish\"\n+ elif \"csh\" == self._name:\n+ suffix = \".csh\"\n+ else:\n+ suffix = \"\"\n+\n+ return \"activate\" + suffix\n+\n+ def _get_source_command(self):\n+ if \"fish\" == self._name:\n+ return \"source\"\n+ elif \"csh\" == self._name:\n+ return \"source\"\n+\n+ return \".\"\n+\n def __repr__(self): # type: () -> str\n return '{}(\"{}\", \"{}\")'.format(self.__class__.__name__, self._name, self._path)\n", "issue": "poetry shell does not activate virtualenv \n<!-- Checked checkbox should look like this: [x] -->\r\n- [x] I am on the [latest](https://github.com/sdispater/poetry/releases/latest) Poetry version.\r\n- [x] I have searched the [issues](https://github.com/sdispater/poetry/issues) of this repo and believe that this is not a duplicate.\r\n\r\n<!--\r\n Once those are done, if you're able to fill in the following list with your information,\r\n it'd be very helpful to whoever handles the issue.\r\n-->\r\n\r\n- **OS version and name**: Mac OS X, High Sierra\r\n- **Poetry version**: 0.12.5\r\n\r\n## Issue\r\nSimilar to ```pipenv shell```, I would have expected that when running ```poetry shell``` the virtualenv gets activated, but apparently this is not the case...\r\n\r\n\r\n```console\r\n\u279c which python\r\n/Users/timon/.pyenv/shims/python\r\n\u279c poetry shell\r\nSpawning shell within /Users/timon/Library/Caches/pypoetry/virtualenvs/YOLO-SAR-py3.7\r\n\u279c which python\r\n/Users/timon/.pyenv/shims/python\r\n\u279c source /Users/timon/Library/Caches/pypoetry/virtualenvs/yolo-sar-py3.7/bin/activate\r\n\u279c which python\r\n/Users/timon/Library/Caches/pypoetry/virtualenvs/yolo-sar-py3.7/bin/python\r\n```\r\n\r\nfor comparison\r\n```console\r\n\u279c poetry run which python\r\n/Users/timon/Library/Caches/pypoetry/virtualenvs/yolo-sar-py3.7/bin/python\r\n```\r\n\r\n\r\nAm I misunderstanding something and this is expected behaviour or is it a bug? \r\n\r\nThanks a lot already for your time :)\n", "before_files": [{"content": "import os\n\nfrom shellingham import detect_shell\nfrom shellingham import ShellDetectionFailure\n\n\nclass Shell:\n \"\"\"\n Represents the current shell.\n \"\"\"\n\n _shell = None\n\n def __init__(self, name, path): # type: (str, str) -> None\n self._name = name\n self._path = path\n\n @property\n def name(self): # type: () -> str\n return self._name\n\n @property\n def path(self): # type: () -> str\n return self._path\n\n @classmethod\n def get(cls): # type: () -> Shell\n \"\"\"\n Retrieve the current shell.\n \"\"\"\n if cls._shell is not None:\n return cls._shell\n\n try:\n name, path = detect_shell(os.getpid())\n except (RuntimeError, ShellDetectionFailure):\n raise RuntimeError(\"Unable to detect the current shell.\")\n\n cls._shell = cls(name, path)\n\n return cls._shell\n\n def __repr__(self): # type: () -> str\n return '{}(\"{}\", \"{}\")'.format(self.__class__.__name__, self._name, self._path)\n", "path": "poetry/utils/shell.py"}, {"content": "import sys\n\nfrom os import environ\nfrom distutils.util import strtobool\n\nfrom .env_command import EnvCommand\n\n\nclass ShellCommand(EnvCommand):\n\n name = \"shell\"\n description = \"Spawns a shell within the virtual environment.\"\n\n help = \"\"\"The <info>shell</> command spawns a shell, according to the\n<comment>$SHELL</> environment variable, within the virtual environment.\nIf one doesn't exist yet, it will be created.\n\"\"\"\n\n def handle(self):\n from poetry.utils.shell import Shell\n\n # Check if it's already activated or doesn't exist and won't be created\n venv_activated = strtobool(environ.get(\"POETRY_ACTIVE\", \"0\")) or getattr(\n sys, \"real_prefix\", sys.prefix\n ) == str(self.env.path)\n if venv_activated:\n self.line(\n \"Virtual environment already activated: \"\n \"<info>{}</>\".format(self.env.path)\n )\n\n return\n\n self.line(\"Spawning shell within <info>{}</>\".format(self.env.path))\n\n # Setting this to avoid spawning unnecessary nested shells\n environ[\"POETRY_ACTIVE\"] = \"1\"\n shell = Shell.get()\n self.env.execute(shell.path)\n environ.pop(\"POETRY_ACTIVE\")\n", "path": "poetry/console/commands/shell.py"}], "after_files": [{"content": "import os\nimport signal\nimport sys\n\nimport pexpect\n\nfrom clikit.utils.terminal import Terminal\nfrom shellingham import detect_shell\nfrom shellingham import ShellDetectionFailure\n\nfrom ._compat import WINDOWS\nfrom .env import VirtualEnv\n\n\nclass Shell:\n \"\"\"\n Represents the current shell.\n \"\"\"\n\n _shell = None\n\n def __init__(self, name, path): # type: (str, str) -> None\n self._name = name\n self._path = path\n\n @property\n def name(self): # type: () -> str\n return self._name\n\n @property\n def path(self): # type: () -> str\n return self._path\n\n @classmethod\n def get(cls): # type: () -> Shell\n \"\"\"\n Retrieve the current shell.\n \"\"\"\n if cls._shell is not None:\n return cls._shell\n\n try:\n name, path = detect_shell(os.getpid())\n except (RuntimeError, ShellDetectionFailure):\n raise RuntimeError(\"Unable to detect the current shell.\")\n\n cls._shell = cls(name, path)\n\n return cls._shell\n\n def activate(self, env): # type: (VirtualEnv) -> None\n if WINDOWS:\n return env.execute(self.path)\n\n terminal = Terminal()\n with env.temp_environ():\n c = pexpect.spawn(\n self._path, [\"-i\"], dimensions=(terminal.height, terminal.width)\n )\n\n c.setecho(False)\n activate_script = self._get_activate_script()\n bin_dir = \"Scripts\" if WINDOWS else \"bin\"\n activate_path = env.path / bin_dir / activate_script\n c.sendline(\"{} {}\".format(self._get_source_command(), activate_path))\n\n def resize(sig, data):\n terminal = Terminal()\n c.setwinsize(terminal.height, terminal.width)\n\n signal.signal(signal.SIGWINCH, resize)\n\n # Interact with the new shell.\n c.interact(escape_character=None)\n c.close()\n\n sys.exit(c.exitstatus)\n\n def _get_activate_script(self):\n if \"fish\" == self._name:\n suffix = \".fish\"\n elif \"csh\" == self._name:\n suffix = \".csh\"\n else:\n suffix = \"\"\n\n return \"activate\" + suffix\n\n def _get_source_command(self):\n if \"fish\" == self._name:\n return \"source\"\n elif \"csh\" == self._name:\n return \"source\"\n\n return \".\"\n\n def __repr__(self): # type: () -> str\n return '{}(\"{}\", \"{}\")'.format(self.__class__.__name__, self._name, self._path)\n", "path": "poetry/utils/shell.py"}, {"content": "import sys\n\nfrom os import environ\nfrom distutils.util import strtobool\n\nfrom .env_command import EnvCommand\n\n\nclass ShellCommand(EnvCommand):\n\n name = \"shell\"\n description = \"Spawns a shell within the virtual environment.\"\n\n help = \"\"\"The <info>shell</> command spawns a shell, according to the\n<comment>$SHELL</> environment variable, within the virtual environment.\nIf one doesn't exist yet, it will be created.\n\"\"\"\n\n def handle(self):\n from poetry.utils.shell import Shell\n\n # Check if it's already activated or doesn't exist and won't be created\n venv_activated = strtobool(environ.get(\"POETRY_ACTIVE\", \"0\")) or getattr(\n sys, \"real_prefix\", sys.prefix\n ) == str(self.env.path)\n if venv_activated:\n self.line(\n \"Virtual environment already activated: \"\n \"<info>{}</>\".format(self.env.path)\n )\n\n return\n\n self.line(\"Spawning shell within <info>{}</>\".format(self.env.path))\n\n # Setting this to avoid spawning unnecessary nested shells\n environ[\"POETRY_ACTIVE\"] = \"1\"\n shell = Shell.get()\n shell.activate(self.env)\n environ.pop(\"POETRY_ACTIVE\")\n", "path": "poetry/console/commands/shell.py"}]}
| 1,351 | 623 |
gh_patches_debug_16933
|
rasdani/github-patches
|
git_diff
|
mkdocs__mkdocs-395
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Title is used as a section if file is in subdirectory
Assuming I have a file at `research/stats.md` and a config line:
```
pages:
- ["research/stats.md", "Stats about Our Collection"]
```
I would assume that it would generate a top-level nav item titled "Stats about Our Collection".
In reality, it generates a section **Stats about Our Collection** with a sub-item titled **stats**.
I'm 90% sure this has to do with the logic in [nav.py](https://github.com/mkdocs/mkdocs/blob/master/mkdocs/nav.py#L212-L218) around `child_titles`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mkdocs/nav.py`
Content:
```
1 # coding: utf-8
2
3 """
4 Deals with generating the site-wide navigation.
5
6 This consists of building a set of interlinked page and header objects.
7 """
8
9 from mkdocs import utils, exceptions
10 import posixpath
11 import os
12
13
14 def filename_to_title(filename):
15 """
16 Automatically generate a default title, given a filename.
17 """
18 if utils.is_homepage(filename):
19 return 'Home'
20
21 title = os.path.splitext(filename)[0]
22 title = title.replace('-', ' ').replace('_', ' ')
23 # Captialize if the filename was all lowercase, otherwise leave it as-is.
24 if title.lower() == title:
25 title = title.capitalize()
26 return title
27
28
29 class SiteNavigation(object):
30 def __init__(self, pages_config, use_directory_urls=True):
31 self.url_context = URLContext()
32 self.file_context = FileContext()
33 self.nav_items, self.pages = \
34 _generate_site_navigation(pages_config, self.url_context, use_directory_urls)
35 self.homepage = self.pages[0] if self.pages else None
36 self.use_directory_urls = use_directory_urls
37
38 def __str__(self):
39 return ''.join([str(item) for item in self])
40
41 def __iter__(self):
42 return iter(self.nav_items)
43
44 def walk_pages(self):
45 """
46 Returns each page in the site in turn.
47
48 Additionally this sets the active status of the pages and headers,
49 in the site navigation, so that the rendered navbar can correctly
50 highlight the currently active page and/or header item.
51 """
52 page = self.homepage
53 page.set_active()
54 self.url_context.set_current_url(page.abs_url)
55 self.file_context.set_current_path(page.input_path)
56 yield page
57 while page.next_page:
58 page.set_active(False)
59 page = page.next_page
60 page.set_active()
61 self.url_context.set_current_url(page.abs_url)
62 self.file_context.set_current_path(page.input_path)
63 yield page
64 page.set_active(False)
65
66 @property
67 def source_files(self):
68 if not hasattr(self, '_source_files'):
69 self._source_files = set([page.input_path for page in self.pages])
70 return self._source_files
71
72
73 class URLContext(object):
74 """
75 The URLContext is used to ensure that we can generate the appropriate
76 relative URLs to other pages from any given page in the site.
77
78 We use relative URLs so that static sites can be deployed to any location
79 without having to specify what the path component on the host will be
80 if the documentation is not hosted at the root path.
81 """
82
83 def __init__(self):
84 self.base_path = '/'
85
86 def set_current_url(self, current_url):
87 self.base_path = posixpath.dirname(current_url)
88
89 def make_relative(self, url):
90 """
91 Given a URL path return it as a relative URL,
92 given the context of the current page.
93 """
94 suffix = '/' if (url.endswith('/') and len(url) > 1) else ''
95 # Workaround for bug on `posixpath.relpath()` in Python 2.6
96 if self.base_path == '/':
97 if url == '/':
98 # Workaround for static assets
99 return '.'
100 return url.lstrip('/')
101 # Under Python 2.6, relative_path adds an extra '/' at the end.
102 relative_path = posixpath.relpath(url, start=self.base_path).rstrip('/') + suffix
103
104 return relative_path
105
106
107 class FileContext(object):
108 """
109 The FileContext is used to ensure that we can generate the appropriate
110 full path for other pages given their relative path from a particular page.
111
112 This is used when we have relative hyperlinks in the documentation, so that
113 we can ensure that they point to markdown documents that actually exist
114 in the `pages` config.
115 """
116 def __init__(self):
117 self.current_file = None
118 self.base_path = ''
119
120 def set_current_path(self, current_path):
121 self.current_file = current_path
122 self.base_path = os.path.dirname(current_path)
123
124 def make_absolute(self, path):
125 """
126 Given a relative file path return it as a POSIX-style
127 absolute filepath, given the context of the current page.
128 """
129 return posixpath.normpath(posixpath.join(self.base_path, path))
130
131
132 class Page(object):
133 def __init__(self, title, url, path, url_context):
134 self.title = title
135 self.abs_url = url
136 self.active = False
137 self.url_context = url_context
138
139 # Relative paths to the input markdown file and output html file.
140 self.input_path = path
141 self.output_path = utils.get_html_path(path)
142
143 # Links to related pages
144 self.previous_page = None
145 self.next_page = None
146 self.ancestors = []
147
148 @property
149 def url(self):
150 return self.url_context.make_relative(self.abs_url)
151
152 @property
153 def is_homepage(self):
154 return utils.is_homepage(self.input_path)
155
156 def __str__(self):
157 return self._indent_print()
158
159 def _indent_print(self, depth=0):
160 indent = ' ' * depth
161 active_marker = ' [*]' if self.active else ''
162 title = self.title if (self.title is not None) else '[blank]'
163 return '%s%s - %s%s\n' % (indent, title, self.abs_url, active_marker)
164
165 def set_active(self, active=True):
166 self.active = active
167 for ancestor in self.ancestors:
168 ancestor.active = active
169
170
171 class Header(object):
172 def __init__(self, title, children):
173 self.title, self.children = title, children
174 self.active = False
175
176 def __str__(self):
177 return self._indent_print()
178
179 def _indent_print(self, depth=0):
180 indent = ' ' * depth
181 active_marker = ' [*]' if self.active else ''
182 ret = '%s%s%s\n' % (indent, self.title, active_marker)
183 for item in self.children:
184 ret += item._indent_print(depth + 1)
185 return ret
186
187
188 def _generate_site_navigation(pages_config, url_context, use_directory_urls=True):
189 """
190 Returns a list of Page and Header instances that represent the
191 top level site navigation.
192 """
193 nav_items = []
194 pages = []
195 previous = None
196
197 for config_line in pages_config:
198 if isinstance(config_line, str):
199 path = config_line
200 title, child_title = None, None
201 elif len(config_line) in (1, 2, 3):
202 # Pad any items that don't exist with 'None'
203 padded_config = (list(config_line) + [None, None])[:3]
204 path, title, child_title = padded_config
205 else:
206 msg = (
207 "Line in 'page' config contained %d items. "
208 "Expected 1, 2 or 3 strings." % len(config_line)
209 )
210 raise exceptions.ConfigurationError(msg)
211
212 if title is None:
213 filename = path.split(os.path.sep)[0]
214 title = filename_to_title(filename)
215
216 if child_title is None and os.path.sep in path:
217 filename = path.split(os.path.sep)[-1]
218 child_title = filename_to_title(filename)
219
220 url = utils.get_url_path(path, use_directory_urls)
221
222 if not child_title:
223 # New top level page.
224 page = Page(title=title, url=url, path=path, url_context=url_context)
225 nav_items.append(page)
226 elif not nav_items or (nav_items[-1].title != title):
227 # New second level page.
228 page = Page(title=child_title, url=url, path=path, url_context=url_context)
229 header = Header(title=title, children=[page])
230 nav_items.append(header)
231 page.ancestors = [header]
232 else:
233 # Additional second level page.
234 page = Page(title=child_title, url=url, path=path, url_context=url_context)
235 header = nav_items[-1]
236 header.children.append(page)
237 page.ancestors = [header]
238
239 # Add in previous and next information.
240 if previous:
241 page.previous_page = previous
242 previous.next_page = page
243 previous = page
244
245 pages.append(page)
246
247 return (nav_items, pages)
248
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/mkdocs/nav.py b/mkdocs/nav.py
--- a/mkdocs/nav.py
+++ b/mkdocs/nav.py
@@ -209,14 +209,17 @@
)
raise exceptions.ConfigurationError(msg)
+ # If both the title and child_title are None, then we
+ # have just been given a path. If that path contains a /
+ # then lets automatically nest it.
+ if title is None and child_title is None and os.path.sep in path:
+ filename = path.split(os.path.sep)[-1]
+ child_title = filename_to_title(filename)
+
if title is None:
filename = path.split(os.path.sep)[0]
title = filename_to_title(filename)
- if child_title is None and os.path.sep in path:
- filename = path.split(os.path.sep)[-1]
- child_title = filename_to_title(filename)
-
url = utils.get_url_path(path, use_directory_urls)
if not child_title:
|
{"golden_diff": "diff --git a/mkdocs/nav.py b/mkdocs/nav.py\n--- a/mkdocs/nav.py\n+++ b/mkdocs/nav.py\n@@ -209,14 +209,17 @@\n )\n raise exceptions.ConfigurationError(msg)\n \n+ # If both the title and child_title are None, then we\n+ # have just been given a path. If that path contains a /\n+ # then lets automatically nest it.\n+ if title is None and child_title is None and os.path.sep in path:\n+ filename = path.split(os.path.sep)[-1]\n+ child_title = filename_to_title(filename)\n+\n if title is None:\n filename = path.split(os.path.sep)[0]\n title = filename_to_title(filename)\n \n- if child_title is None and os.path.sep in path:\n- filename = path.split(os.path.sep)[-1]\n- child_title = filename_to_title(filename)\n-\n url = utils.get_url_path(path, use_directory_urls)\n \n if not child_title:\n", "issue": "Title is used as a section if file is in subdirectory\nAssuming I have a file at `research/stats.md` and a config line:\n\n```\npages:\n- [\"research/stats.md\", \"Stats about Our Collection\"]\n```\n\nI would assume that it would generate a top-level nav item titled \"Stats about Our Collection\".\n\nIn reality, it generates a section **Stats about Our Collection** with a sub-item titled **stats**.\n\nI'm 90% sure this has to do with the logic in [nav.py](https://github.com/mkdocs/mkdocs/blob/master/mkdocs/nav.py#L212-L218) around `child_titles`.\n\n", "before_files": [{"content": "# coding: utf-8\n\n\"\"\"\nDeals with generating the site-wide navigation.\n\nThis consists of building a set of interlinked page and header objects.\n\"\"\"\n\nfrom mkdocs import utils, exceptions\nimport posixpath\nimport os\n\n\ndef filename_to_title(filename):\n \"\"\"\n Automatically generate a default title, given a filename.\n \"\"\"\n if utils.is_homepage(filename):\n return 'Home'\n\n title = os.path.splitext(filename)[0]\n title = title.replace('-', ' ').replace('_', ' ')\n # Captialize if the filename was all lowercase, otherwise leave it as-is.\n if title.lower() == title:\n title = title.capitalize()\n return title\n\n\nclass SiteNavigation(object):\n def __init__(self, pages_config, use_directory_urls=True):\n self.url_context = URLContext()\n self.file_context = FileContext()\n self.nav_items, self.pages = \\\n _generate_site_navigation(pages_config, self.url_context, use_directory_urls)\n self.homepage = self.pages[0] if self.pages else None\n self.use_directory_urls = use_directory_urls\n\n def __str__(self):\n return ''.join([str(item) for item in self])\n\n def __iter__(self):\n return iter(self.nav_items)\n\n def walk_pages(self):\n \"\"\"\n Returns each page in the site in turn.\n\n Additionally this sets the active status of the pages and headers,\n in the site navigation, so that the rendered navbar can correctly\n highlight the currently active page and/or header item.\n \"\"\"\n page = self.homepage\n page.set_active()\n self.url_context.set_current_url(page.abs_url)\n self.file_context.set_current_path(page.input_path)\n yield page\n while page.next_page:\n page.set_active(False)\n page = page.next_page\n page.set_active()\n self.url_context.set_current_url(page.abs_url)\n self.file_context.set_current_path(page.input_path)\n yield page\n page.set_active(False)\n\n @property\n def source_files(self):\n if not hasattr(self, '_source_files'):\n self._source_files = set([page.input_path for page in self.pages])\n return self._source_files\n\n\nclass URLContext(object):\n \"\"\"\n The URLContext is used to ensure that we can generate the appropriate\n relative URLs to other pages from any given page in the site.\n\n We use relative URLs so that static sites can be deployed to any location\n without having to specify what the path component on the host will be\n if the documentation is not hosted at the root path.\n \"\"\"\n\n def __init__(self):\n self.base_path = '/'\n\n def set_current_url(self, current_url):\n self.base_path = posixpath.dirname(current_url)\n\n def make_relative(self, url):\n \"\"\"\n Given a URL path return it as a relative URL,\n given the context of the current page.\n \"\"\"\n suffix = '/' if (url.endswith('/') and len(url) > 1) else ''\n # Workaround for bug on `posixpath.relpath()` in Python 2.6\n if self.base_path == '/':\n if url == '/':\n # Workaround for static assets\n return '.'\n return url.lstrip('/')\n # Under Python 2.6, relative_path adds an extra '/' at the end.\n relative_path = posixpath.relpath(url, start=self.base_path).rstrip('/') + suffix\n\n return relative_path\n\n\nclass FileContext(object):\n \"\"\"\n The FileContext is used to ensure that we can generate the appropriate\n full path for other pages given their relative path from a particular page.\n\n This is used when we have relative hyperlinks in the documentation, so that\n we can ensure that they point to markdown documents that actually exist\n in the `pages` config.\n \"\"\"\n def __init__(self):\n self.current_file = None\n self.base_path = ''\n\n def set_current_path(self, current_path):\n self.current_file = current_path\n self.base_path = os.path.dirname(current_path)\n\n def make_absolute(self, path):\n \"\"\"\n Given a relative file path return it as a POSIX-style\n absolute filepath, given the context of the current page.\n \"\"\"\n return posixpath.normpath(posixpath.join(self.base_path, path))\n\n\nclass Page(object):\n def __init__(self, title, url, path, url_context):\n self.title = title\n self.abs_url = url\n self.active = False\n self.url_context = url_context\n\n # Relative paths to the input markdown file and output html file.\n self.input_path = path\n self.output_path = utils.get_html_path(path)\n\n # Links to related pages\n self.previous_page = None\n self.next_page = None\n self.ancestors = []\n\n @property\n def url(self):\n return self.url_context.make_relative(self.abs_url)\n\n @property\n def is_homepage(self):\n return utils.is_homepage(self.input_path)\n\n def __str__(self):\n return self._indent_print()\n\n def _indent_print(self, depth=0):\n indent = ' ' * depth\n active_marker = ' [*]' if self.active else ''\n title = self.title if (self.title is not None) else '[blank]'\n return '%s%s - %s%s\\n' % (indent, title, self.abs_url, active_marker)\n\n def set_active(self, active=True):\n self.active = active\n for ancestor in self.ancestors:\n ancestor.active = active\n\n\nclass Header(object):\n def __init__(self, title, children):\n self.title, self.children = title, children\n self.active = False\n\n def __str__(self):\n return self._indent_print()\n\n def _indent_print(self, depth=0):\n indent = ' ' * depth\n active_marker = ' [*]' if self.active else ''\n ret = '%s%s%s\\n' % (indent, self.title, active_marker)\n for item in self.children:\n ret += item._indent_print(depth + 1)\n return ret\n\n\ndef _generate_site_navigation(pages_config, url_context, use_directory_urls=True):\n \"\"\"\n Returns a list of Page and Header instances that represent the\n top level site navigation.\n \"\"\"\n nav_items = []\n pages = []\n previous = None\n\n for config_line in pages_config:\n if isinstance(config_line, str):\n path = config_line\n title, child_title = None, None\n elif len(config_line) in (1, 2, 3):\n # Pad any items that don't exist with 'None'\n padded_config = (list(config_line) + [None, None])[:3]\n path, title, child_title = padded_config\n else:\n msg = (\n \"Line in 'page' config contained %d items. \"\n \"Expected 1, 2 or 3 strings.\" % len(config_line)\n )\n raise exceptions.ConfigurationError(msg)\n\n if title is None:\n filename = path.split(os.path.sep)[0]\n title = filename_to_title(filename)\n\n if child_title is None and os.path.sep in path:\n filename = path.split(os.path.sep)[-1]\n child_title = filename_to_title(filename)\n\n url = utils.get_url_path(path, use_directory_urls)\n\n if not child_title:\n # New top level page.\n page = Page(title=title, url=url, path=path, url_context=url_context)\n nav_items.append(page)\n elif not nav_items or (nav_items[-1].title != title):\n # New second level page.\n page = Page(title=child_title, url=url, path=path, url_context=url_context)\n header = Header(title=title, children=[page])\n nav_items.append(header)\n page.ancestors = [header]\n else:\n # Additional second level page.\n page = Page(title=child_title, url=url, path=path, url_context=url_context)\n header = nav_items[-1]\n header.children.append(page)\n page.ancestors = [header]\n\n # Add in previous and next information.\n if previous:\n page.previous_page = previous\n previous.next_page = page\n previous = page\n\n pages.append(page)\n\n return (nav_items, pages)\n", "path": "mkdocs/nav.py"}], "after_files": [{"content": "# coding: utf-8\n\n\"\"\"\nDeals with generating the site-wide navigation.\n\nThis consists of building a set of interlinked page and header objects.\n\"\"\"\n\nfrom mkdocs import utils, exceptions\nimport posixpath\nimport os\n\n\ndef filename_to_title(filename):\n \"\"\"\n Automatically generate a default title, given a filename.\n \"\"\"\n if utils.is_homepage(filename):\n return 'Home'\n\n title = os.path.splitext(filename)[0]\n title = title.replace('-', ' ').replace('_', ' ')\n # Captialize if the filename was all lowercase, otherwise leave it as-is.\n if title.lower() == title:\n title = title.capitalize()\n return title\n\n\nclass SiteNavigation(object):\n def __init__(self, pages_config, use_directory_urls=True):\n self.url_context = URLContext()\n self.file_context = FileContext()\n self.nav_items, self.pages = \\\n _generate_site_navigation(pages_config, self.url_context, use_directory_urls)\n self.homepage = self.pages[0] if self.pages else None\n self.use_directory_urls = use_directory_urls\n\n def __str__(self):\n return ''.join([str(item) for item in self])\n\n def __iter__(self):\n return iter(self.nav_items)\n\n def walk_pages(self):\n \"\"\"\n Returns each page in the site in turn.\n\n Additionally this sets the active status of the pages and headers,\n in the site navigation, so that the rendered navbar can correctly\n highlight the currently active page and/or header item.\n \"\"\"\n page = self.homepage\n page.set_active()\n self.url_context.set_current_url(page.abs_url)\n self.file_context.set_current_path(page.input_path)\n yield page\n while page.next_page:\n page.set_active(False)\n page = page.next_page\n page.set_active()\n self.url_context.set_current_url(page.abs_url)\n self.file_context.set_current_path(page.input_path)\n yield page\n page.set_active(False)\n\n @property\n def source_files(self):\n if not hasattr(self, '_source_files'):\n self._source_files = set([page.input_path for page in self.pages])\n return self._source_files\n\n\nclass URLContext(object):\n \"\"\"\n The URLContext is used to ensure that we can generate the appropriate\n relative URLs to other pages from any given page in the site.\n\n We use relative URLs so that static sites can be deployed to any location\n without having to specify what the path component on the host will be\n if the documentation is not hosted at the root path.\n \"\"\"\n\n def __init__(self):\n self.base_path = '/'\n\n def set_current_url(self, current_url):\n self.base_path = posixpath.dirname(current_url)\n\n def make_relative(self, url):\n \"\"\"\n Given a URL path return it as a relative URL,\n given the context of the current page.\n \"\"\"\n suffix = '/' if (url.endswith('/') and len(url) > 1) else ''\n # Workaround for bug on `posixpath.relpath()` in Python 2.6\n if self.base_path == '/':\n if url == '/':\n # Workaround for static assets\n return '.'\n return url.lstrip('/')\n # Under Python 2.6, relative_path adds an extra '/' at the end.\n relative_path = posixpath.relpath(url, start=self.base_path).rstrip('/') + suffix\n\n return relative_path\n\n\nclass FileContext(object):\n \"\"\"\n The FileContext is used to ensure that we can generate the appropriate\n full path for other pages given their relative path from a particular page.\n\n This is used when we have relative hyperlinks in the documentation, so that\n we can ensure that they point to markdown documents that actually exist\n in the `pages` config.\n \"\"\"\n def __init__(self):\n self.current_file = None\n self.base_path = ''\n\n def set_current_path(self, current_path):\n self.current_file = current_path\n self.base_path = os.path.dirname(current_path)\n\n def make_absolute(self, path):\n \"\"\"\n Given a relative file path return it as a POSIX-style\n absolute filepath, given the context of the current page.\n \"\"\"\n return posixpath.normpath(posixpath.join(self.base_path, path))\n\n\nclass Page(object):\n def __init__(self, title, url, path, url_context):\n self.title = title\n self.abs_url = url\n self.active = False\n self.url_context = url_context\n\n # Relative paths to the input markdown file and output html file.\n self.input_path = path\n self.output_path = utils.get_html_path(path)\n\n # Links to related pages\n self.previous_page = None\n self.next_page = None\n self.ancestors = []\n\n @property\n def url(self):\n return self.url_context.make_relative(self.abs_url)\n\n @property\n def is_homepage(self):\n return utils.is_homepage(self.input_path)\n\n def __str__(self):\n return self._indent_print()\n\n def _indent_print(self, depth=0):\n indent = ' ' * depth\n active_marker = ' [*]' if self.active else ''\n title = self.title if (self.title is not None) else '[blank]'\n return '%s%s - %s%s\\n' % (indent, title, self.abs_url, active_marker)\n\n def set_active(self, active=True):\n self.active = active\n for ancestor in self.ancestors:\n ancestor.active = active\n\n\nclass Header(object):\n def __init__(self, title, children):\n self.title, self.children = title, children\n self.active = False\n\n def __str__(self):\n return self._indent_print()\n\n def _indent_print(self, depth=0):\n indent = ' ' * depth\n active_marker = ' [*]' if self.active else ''\n ret = '%s%s%s\\n' % (indent, self.title, active_marker)\n for item in self.children:\n ret += item._indent_print(depth + 1)\n return ret\n\n\ndef _generate_site_navigation(pages_config, url_context, use_directory_urls=True):\n \"\"\"\n Returns a list of Page and Header instances that represent the\n top level site navigation.\n \"\"\"\n nav_items = []\n pages = []\n previous = None\n\n for config_line in pages_config:\n if isinstance(config_line, str):\n path = config_line\n title, child_title = None, None\n elif len(config_line) in (1, 2, 3):\n # Pad any items that don't exist with 'None'\n padded_config = (list(config_line) + [None, None])[:3]\n path, title, child_title = padded_config\n else:\n msg = (\n \"Line in 'page' config contained %d items. \"\n \"Expected 1, 2 or 3 strings.\" % len(config_line)\n )\n raise exceptions.ConfigurationError(msg)\n\n # If both the title and child_title are None, then we\n # have just been given a path. If that path contains a /\n # then lets automatically nest it.\n if title is None and child_title is None and os.path.sep in path:\n filename = path.split(os.path.sep)[-1]\n child_title = filename_to_title(filename)\n\n if title is None:\n filename = path.split(os.path.sep)[0]\n title = filename_to_title(filename)\n\n url = utils.get_url_path(path, use_directory_urls)\n\n if not child_title:\n # New top level page.\n page = Page(title=title, url=url, path=path, url_context=url_context)\n nav_items.append(page)\n elif not nav_items or (nav_items[-1].title != title):\n # New second level page.\n page = Page(title=child_title, url=url, path=path, url_context=url_context)\n header = Header(title=title, children=[page])\n nav_items.append(header)\n page.ancestors = [header]\n else:\n # Additional second level page.\n page = Page(title=child_title, url=url, path=path, url_context=url_context)\n header = nav_items[-1]\n header.children.append(page)\n page.ancestors = [header]\n\n # Add in previous and next information.\n if previous:\n page.previous_page = previous\n previous.next_page = page\n previous = page\n\n pages.append(page)\n\n return (nav_items, pages)\n", "path": "mkdocs/nav.py"}]}
| 2,838 | 223 |
gh_patches_debug_15892
|
rasdani/github-patches
|
git_diff
|
bentoml__BentoML-4068
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
fix(scheduling): expose Strategy class
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/bentoml/__init__.py`
Content:
```
1 """
2 BentoML
3 =======
4
5 BentoML is the unified ML Model Serving framework. Data Scientists and ML Engineers use
6 BentoML to:
7
8 * Accelerate and standardize the process of taking ML models to production across teams
9 * Build reliable, scalable, and high performance model serving systems
10 * Provide a flexible MLOps platform that grows with your Data Science needs
11
12 To learn more, visit BentoML documentation at: http://docs.bentoml.com
13 To get involved with the development, find us on GitHub: https://github.com/bentoml
14 And join us in the BentoML slack community: https://l.bentoml.com/join-slack
15 """
16
17 from typing import TYPE_CHECKING
18
19 from ._internal.configuration import BENTOML_VERSION as __version__
20 from ._internal.configuration import load_config
21 from ._internal.configuration import save_config
22 from ._internal.configuration import set_serialization_strategy
23
24 # Inject dependencies and configurations
25 load_config()
26
27 # BentoML built-in types
28 from ._internal.bento import Bento
29 from ._internal.cloud import YataiClient
30 from ._internal.context import ServiceContext as Context
31 from ._internal.models import Model
32 from ._internal.monitoring import monitor
33 from ._internal.runner import Runnable
34 from ._internal.runner import Runner
35 from ._internal.service import Service
36 from ._internal.service.loader import load
37 from ._internal.tag import Tag
38 from ._internal.utils.http import Cookie
39
40 # Bento management APIs
41 from .bentos import delete
42 from .bentos import export_bento
43 from .bentos import get
44 from .bentos import import_bento
45 from .bentos import list # pylint: disable=W0622
46 from .bentos import pull
47 from .bentos import push
48 from .bentos import serve
49
50 # server API
51 from .server import GrpcServer
52 from .server import HTTPServer
53
54 # Framework specific modules, model management and IO APIs are lazily loaded upon import.
55 if TYPE_CHECKING:
56 from . import catboost
57 from . import detectron
58 from . import diffusers
59 from . import easyocr
60 from . import fastai
61 from . import flax
62 from . import gluon
63 from . import h2o
64 from . import keras
65 from . import lightgbm
66 from . import mlflow
67 from . import onnx
68 from . import onnxmlir
69 from . import paddle
70 from . import picklable_model
71 from . import pycaret
72 from . import pytorch
73 from . import pytorch_lightning
74 from . import ray
75 from . import sklearn
76 from . import spacy
77 from . import statsmodels
78 from . import tensorflow
79 from . import tensorflow_v1
80 from . import torchscript
81 from . import transformers
82 from . import triton
83 from . import xgboost
84
85 # isort: off
86 from . import io
87 from . import models
88 from . import metrics # Prometheus metrics client
89 from . import container # Container API
90 from . import client # Client API
91 from . import batch # Batch API
92 from . import exceptions # BentoML exceptions
93 from . import server # Server API
94 from . import monitoring # Monitoring API
95 from . import cloud # Cloud API
96
97 # isort: on
98 else:
99 from ._internal.utils import LazyLoader as _LazyLoader
100
101 # ML Frameworks
102 catboost = _LazyLoader("bentoml.catboost", globals(), "bentoml.catboost")
103 detectron = _LazyLoader("bentoml.detectron", globals(), "bentoml.detectron")
104 diffusers = _LazyLoader("bentoml.diffusers", globals(), "bentoml.diffusers")
105 easyocr = _LazyLoader("bentoml.easyocr", globals(), "bentoml.easyocr")
106 flax = _LazyLoader("bentoml.flax", globals(), "bentoml.flax")
107 fastai = _LazyLoader("bentoml.fastai", globals(), "bentoml.fastai")
108 gluon = _LazyLoader("bentoml.gluon", globals(), "bentoml.gluon")
109 h2o = _LazyLoader("bentoml.h2o", globals(), "bentoml.h2o")
110 lightgbm = _LazyLoader("bentoml.lightgbm", globals(), "bentoml.lightgbm")
111 mlflow = _LazyLoader("bentoml.mlflow", globals(), "bentoml.mlflow")
112 onnx = _LazyLoader("bentoml.onnx", globals(), "bentoml.onnx")
113 onnxmlir = _LazyLoader("bentoml.onnxmlir", globals(), "bentoml.onnxmlir")
114 keras = _LazyLoader("bentoml.keras", globals(), "bentoml.keras")
115 paddle = _LazyLoader("bentoml.paddle", globals(), "bentoml.paddle")
116 pycaret = _LazyLoader("bentoml.pycaret", globals(), "bentoml.pycaret")
117 pytorch = _LazyLoader("bentoml.pytorch", globals(), "bentoml.pytorch")
118 pytorch_lightning = _LazyLoader(
119 "bentoml.pytorch_lightning", globals(), "bentoml.pytorch_lightning"
120 )
121 sklearn = _LazyLoader("bentoml.sklearn", globals(), "bentoml.sklearn")
122 picklable_model = _LazyLoader(
123 "bentoml.picklable_model", globals(), "bentoml.picklable_model"
124 )
125 spacy = _LazyLoader("bentoml.spacy", globals(), "bentoml.spacy")
126 statsmodels = _LazyLoader("bentoml.statsmodels", globals(), "bentoml.statsmodels")
127 tensorflow = _LazyLoader("bentoml.tensorflow", globals(), "bentoml.tensorflow")
128 tensorflow_v1 = _LazyLoader(
129 "bentoml.tensorflow_v1", globals(), "bentoml.tensorflow_v1"
130 )
131 torchscript = _LazyLoader("bentoml.torchscript", globals(), "bentoml.torchscript")
132 transformers = _LazyLoader(
133 "bentoml.transformers", globals(), "bentoml.transformers"
134 )
135 xgboost = _LazyLoader("bentoml.xgboost", globals(), "bentoml.xgboost")
136
137 # Integrations
138 triton = _LazyLoader("bentoml.triton", globals(), "bentoml.triton")
139 ray = _LazyLoader("bentoml.ray", globals(), "bentoml.ray")
140
141 io = _LazyLoader("bentoml.io", globals(), "bentoml.io")
142 batch = _LazyLoader("bentoml.batch", globals(), "bentoml.batch")
143 models = _LazyLoader("bentoml.models", globals(), "bentoml.models")
144 metrics = _LazyLoader("bentoml.metrics", globals(), "bentoml.metrics")
145 container = _LazyLoader("bentoml.container", globals(), "bentoml.container")
146 client = _LazyLoader("bentoml.client", globals(), "bentoml.client")
147 server = _LazyLoader("bentoml.server", globals(), "bentoml.server")
148 exceptions = _LazyLoader("bentoml.exceptions", globals(), "bentoml.exceptions")
149 monitoring = _LazyLoader("bentoml.monitoring", globals(), "bentoml.monitoring")
150 cloud = _LazyLoader("bentoml.cloud", globals(), "bentoml.cloud")
151
152 del _LazyLoader
153
154 __all__ = [
155 "__version__",
156 "Context",
157 "Cookie",
158 "Service",
159 "models",
160 "batch",
161 "metrics",
162 "container",
163 "client",
164 "server",
165 "io",
166 "Tag",
167 "Model",
168 "Runner",
169 "Runnable",
170 "monitoring",
171 "YataiClient", # Yatai REST API Client
172 # bento APIs
173 "list",
174 "get",
175 "delete",
176 "import_bento",
177 "export_bento",
178 "load",
179 "push",
180 "pull",
181 "serve",
182 "Bento",
183 "exceptions",
184 # server APIs
185 "HTTPServer",
186 "GrpcServer",
187 # Framework specific modules
188 "catboost",
189 "detectron",
190 "diffusers",
191 "easyocr",
192 "flax",
193 "fastai",
194 "gluon",
195 "h2o",
196 "lightgbm",
197 "mlflow",
198 "onnx",
199 "onnxmlir",
200 "paddle",
201 "picklable_model",
202 "pycaret",
203 "pytorch",
204 "pytorch_lightning",
205 "keras",
206 "sklearn",
207 "spacy",
208 "statsmodels",
209 "tensorflow",
210 "tensorflow_v1",
211 "torchscript",
212 "transformers",
213 "xgboost",
214 # integrations
215 "ray",
216 "cloud",
217 "triton",
218 "monitor",
219 "load_config",
220 "save_config",
221 "set_serialization_strategy",
222 ]
223
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/bentoml/__init__.py b/src/bentoml/__init__.py
--- a/src/bentoml/__init__.py
+++ b/src/bentoml/__init__.py
@@ -30,8 +30,10 @@
from ._internal.context import ServiceContext as Context
from ._internal.models import Model
from ._internal.monitoring import monitor
+from ._internal.resource import Resource
from ._internal.runner import Runnable
from ._internal.runner import Runner
+from ._internal.runner.strategy import Strategy
from ._internal.service import Service
from ._internal.service.loader import load
from ._internal.tag import Tag
@@ -219,4 +221,6 @@
"load_config",
"save_config",
"set_serialization_strategy",
+ "Strategy",
+ "Resource",
]
|
{"golden_diff": "diff --git a/src/bentoml/__init__.py b/src/bentoml/__init__.py\n--- a/src/bentoml/__init__.py\n+++ b/src/bentoml/__init__.py\n@@ -30,8 +30,10 @@\n from ._internal.context import ServiceContext as Context\n from ._internal.models import Model\n from ._internal.monitoring import monitor\n+from ._internal.resource import Resource\n from ._internal.runner import Runnable\n from ._internal.runner import Runner\n+from ._internal.runner.strategy import Strategy\n from ._internal.service import Service\n from ._internal.service.loader import load\n from ._internal.tag import Tag\n@@ -219,4 +221,6 @@\n \"load_config\",\n \"save_config\",\n \"set_serialization_strategy\",\n+ \"Strategy\",\n+ \"Resource\",\n ]\n", "issue": "fix(scheduling): expose Strategy class\n\n", "before_files": [{"content": "\"\"\"\nBentoML\n=======\n\nBentoML is the unified ML Model Serving framework. Data Scientists and ML Engineers use\nBentoML to:\n\n* Accelerate and standardize the process of taking ML models to production across teams\n* Build reliable, scalable, and high performance model serving systems\n* Provide a flexible MLOps platform that grows with your Data Science needs\n\nTo learn more, visit BentoML documentation at: http://docs.bentoml.com\nTo get involved with the development, find us on GitHub: https://github.com/bentoml\nAnd join us in the BentoML slack community: https://l.bentoml.com/join-slack\n\"\"\"\n\nfrom typing import TYPE_CHECKING\n\nfrom ._internal.configuration import BENTOML_VERSION as __version__\nfrom ._internal.configuration import load_config\nfrom ._internal.configuration import save_config\nfrom ._internal.configuration import set_serialization_strategy\n\n# Inject dependencies and configurations\nload_config()\n\n# BentoML built-in types\nfrom ._internal.bento import Bento\nfrom ._internal.cloud import YataiClient\nfrom ._internal.context import ServiceContext as Context\nfrom ._internal.models import Model\nfrom ._internal.monitoring import monitor\nfrom ._internal.runner import Runnable\nfrom ._internal.runner import Runner\nfrom ._internal.service import Service\nfrom ._internal.service.loader import load\nfrom ._internal.tag import Tag\nfrom ._internal.utils.http import Cookie\n\n# Bento management APIs\nfrom .bentos import delete\nfrom .bentos import export_bento\nfrom .bentos import get\nfrom .bentos import import_bento\nfrom .bentos import list # pylint: disable=W0622\nfrom .bentos import pull\nfrom .bentos import push\nfrom .bentos import serve\n\n# server API\nfrom .server import GrpcServer\nfrom .server import HTTPServer\n\n# Framework specific modules, model management and IO APIs are lazily loaded upon import.\nif TYPE_CHECKING:\n from . import catboost\n from . import detectron\n from . import diffusers\n from . import easyocr\n from . import fastai\n from . import flax\n from . import gluon\n from . import h2o\n from . import keras\n from . import lightgbm\n from . import mlflow\n from . import onnx\n from . import onnxmlir\n from . import paddle\n from . import picklable_model\n from . import pycaret\n from . import pytorch\n from . import pytorch_lightning\n from . import ray\n from . import sklearn\n from . import spacy\n from . import statsmodels\n from . import tensorflow\n from . import tensorflow_v1\n from . import torchscript\n from . import transformers\n from . import triton\n from . import xgboost\n\n # isort: off\n from . import io\n from . import models\n from . import metrics # Prometheus metrics client\n from . import container # Container API\n from . import client # Client API\n from . import batch # Batch API\n from . import exceptions # BentoML exceptions\n from . import server # Server API\n from . import monitoring # Monitoring API\n from . import cloud # Cloud API\n\n # isort: on\nelse:\n from ._internal.utils import LazyLoader as _LazyLoader\n\n # ML Frameworks\n catboost = _LazyLoader(\"bentoml.catboost\", globals(), \"bentoml.catboost\")\n detectron = _LazyLoader(\"bentoml.detectron\", globals(), \"bentoml.detectron\")\n diffusers = _LazyLoader(\"bentoml.diffusers\", globals(), \"bentoml.diffusers\")\n easyocr = _LazyLoader(\"bentoml.easyocr\", globals(), \"bentoml.easyocr\")\n flax = _LazyLoader(\"bentoml.flax\", globals(), \"bentoml.flax\")\n fastai = _LazyLoader(\"bentoml.fastai\", globals(), \"bentoml.fastai\")\n gluon = _LazyLoader(\"bentoml.gluon\", globals(), \"bentoml.gluon\")\n h2o = _LazyLoader(\"bentoml.h2o\", globals(), \"bentoml.h2o\")\n lightgbm = _LazyLoader(\"bentoml.lightgbm\", globals(), \"bentoml.lightgbm\")\n mlflow = _LazyLoader(\"bentoml.mlflow\", globals(), \"bentoml.mlflow\")\n onnx = _LazyLoader(\"bentoml.onnx\", globals(), \"bentoml.onnx\")\n onnxmlir = _LazyLoader(\"bentoml.onnxmlir\", globals(), \"bentoml.onnxmlir\")\n keras = _LazyLoader(\"bentoml.keras\", globals(), \"bentoml.keras\")\n paddle = _LazyLoader(\"bentoml.paddle\", globals(), \"bentoml.paddle\")\n pycaret = _LazyLoader(\"bentoml.pycaret\", globals(), \"bentoml.pycaret\")\n pytorch = _LazyLoader(\"bentoml.pytorch\", globals(), \"bentoml.pytorch\")\n pytorch_lightning = _LazyLoader(\n \"bentoml.pytorch_lightning\", globals(), \"bentoml.pytorch_lightning\"\n )\n sklearn = _LazyLoader(\"bentoml.sklearn\", globals(), \"bentoml.sklearn\")\n picklable_model = _LazyLoader(\n \"bentoml.picklable_model\", globals(), \"bentoml.picklable_model\"\n )\n spacy = _LazyLoader(\"bentoml.spacy\", globals(), \"bentoml.spacy\")\n statsmodels = _LazyLoader(\"bentoml.statsmodels\", globals(), \"bentoml.statsmodels\")\n tensorflow = _LazyLoader(\"bentoml.tensorflow\", globals(), \"bentoml.tensorflow\")\n tensorflow_v1 = _LazyLoader(\n \"bentoml.tensorflow_v1\", globals(), \"bentoml.tensorflow_v1\"\n )\n torchscript = _LazyLoader(\"bentoml.torchscript\", globals(), \"bentoml.torchscript\")\n transformers = _LazyLoader(\n \"bentoml.transformers\", globals(), \"bentoml.transformers\"\n )\n xgboost = _LazyLoader(\"bentoml.xgboost\", globals(), \"bentoml.xgboost\")\n\n # Integrations\n triton = _LazyLoader(\"bentoml.triton\", globals(), \"bentoml.triton\")\n ray = _LazyLoader(\"bentoml.ray\", globals(), \"bentoml.ray\")\n\n io = _LazyLoader(\"bentoml.io\", globals(), \"bentoml.io\")\n batch = _LazyLoader(\"bentoml.batch\", globals(), \"bentoml.batch\")\n models = _LazyLoader(\"bentoml.models\", globals(), \"bentoml.models\")\n metrics = _LazyLoader(\"bentoml.metrics\", globals(), \"bentoml.metrics\")\n container = _LazyLoader(\"bentoml.container\", globals(), \"bentoml.container\")\n client = _LazyLoader(\"bentoml.client\", globals(), \"bentoml.client\")\n server = _LazyLoader(\"bentoml.server\", globals(), \"bentoml.server\")\n exceptions = _LazyLoader(\"bentoml.exceptions\", globals(), \"bentoml.exceptions\")\n monitoring = _LazyLoader(\"bentoml.monitoring\", globals(), \"bentoml.monitoring\")\n cloud = _LazyLoader(\"bentoml.cloud\", globals(), \"bentoml.cloud\")\n\n del _LazyLoader\n\n__all__ = [\n \"__version__\",\n \"Context\",\n \"Cookie\",\n \"Service\",\n \"models\",\n \"batch\",\n \"metrics\",\n \"container\",\n \"client\",\n \"server\",\n \"io\",\n \"Tag\",\n \"Model\",\n \"Runner\",\n \"Runnable\",\n \"monitoring\",\n \"YataiClient\", # Yatai REST API Client\n # bento APIs\n \"list\",\n \"get\",\n \"delete\",\n \"import_bento\",\n \"export_bento\",\n \"load\",\n \"push\",\n \"pull\",\n \"serve\",\n \"Bento\",\n \"exceptions\",\n # server APIs\n \"HTTPServer\",\n \"GrpcServer\",\n # Framework specific modules\n \"catboost\",\n \"detectron\",\n \"diffusers\",\n \"easyocr\",\n \"flax\",\n \"fastai\",\n \"gluon\",\n \"h2o\",\n \"lightgbm\",\n \"mlflow\",\n \"onnx\",\n \"onnxmlir\",\n \"paddle\",\n \"picklable_model\",\n \"pycaret\",\n \"pytorch\",\n \"pytorch_lightning\",\n \"keras\",\n \"sklearn\",\n \"spacy\",\n \"statsmodels\",\n \"tensorflow\",\n \"tensorflow_v1\",\n \"torchscript\",\n \"transformers\",\n \"xgboost\",\n # integrations\n \"ray\",\n \"cloud\",\n \"triton\",\n \"monitor\",\n \"load_config\",\n \"save_config\",\n \"set_serialization_strategy\",\n]\n", "path": "src/bentoml/__init__.py"}], "after_files": [{"content": "\"\"\"\nBentoML\n=======\n\nBentoML is the unified ML Model Serving framework. Data Scientists and ML Engineers use\nBentoML to:\n\n* Accelerate and standardize the process of taking ML models to production across teams\n* Build reliable, scalable, and high performance model serving systems\n* Provide a flexible MLOps platform that grows with your Data Science needs\n\nTo learn more, visit BentoML documentation at: http://docs.bentoml.com\nTo get involved with the development, find us on GitHub: https://github.com/bentoml\nAnd join us in the BentoML slack community: https://l.bentoml.com/join-slack\n\"\"\"\n\nfrom typing import TYPE_CHECKING\n\nfrom ._internal.configuration import BENTOML_VERSION as __version__\nfrom ._internal.configuration import load_config\nfrom ._internal.configuration import save_config\nfrom ._internal.configuration import set_serialization_strategy\n\n# Inject dependencies and configurations\nload_config()\n\n# BentoML built-in types\nfrom ._internal.bento import Bento\nfrom ._internal.cloud import YataiClient\nfrom ._internal.context import ServiceContext as Context\nfrom ._internal.models import Model\nfrom ._internal.monitoring import monitor\nfrom ._internal.resource import Resource\nfrom ._internal.runner import Runnable\nfrom ._internal.runner import Runner\nfrom ._internal.runner.strategy import Strategy\nfrom ._internal.service import Service\nfrom ._internal.service.loader import load\nfrom ._internal.tag import Tag\nfrom ._internal.utils.http import Cookie\n\n# Bento management APIs\nfrom .bentos import delete\nfrom .bentos import export_bento\nfrom .bentos import get\nfrom .bentos import import_bento\nfrom .bentos import list # pylint: disable=W0622\nfrom .bentos import pull\nfrom .bentos import push\nfrom .bentos import serve\n\n# server API\nfrom .server import GrpcServer\nfrom .server import HTTPServer\n\n# Framework specific modules, model management and IO APIs are lazily loaded upon import.\nif TYPE_CHECKING:\n from . import catboost\n from . import detectron\n from . import diffusers\n from . import easyocr\n from . import fastai\n from . import flax\n from . import gluon\n from . import h2o\n from . import keras\n from . import lightgbm\n from . import mlflow\n from . import onnx\n from . import onnxmlir\n from . import paddle\n from . import picklable_model\n from . import pycaret\n from . import pytorch\n from . import pytorch_lightning\n from . import ray\n from . import sklearn\n from . import spacy\n from . import statsmodels\n from . import tensorflow\n from . import tensorflow_v1\n from . import torchscript\n from . import transformers\n from . import triton\n from . import xgboost\n\n # isort: off\n from . import io\n from . import models\n from . import metrics # Prometheus metrics client\n from . import container # Container API\n from . import client # Client API\n from . import batch # Batch API\n from . import exceptions # BentoML exceptions\n from . import server # Server API\n from . import monitoring # Monitoring API\n from . import cloud # Cloud API\n\n # isort: on\nelse:\n from ._internal.utils import LazyLoader as _LazyLoader\n\n # ML Frameworks\n catboost = _LazyLoader(\"bentoml.catboost\", globals(), \"bentoml.catboost\")\n detectron = _LazyLoader(\"bentoml.detectron\", globals(), \"bentoml.detectron\")\n diffusers = _LazyLoader(\"bentoml.diffusers\", globals(), \"bentoml.diffusers\")\n easyocr = _LazyLoader(\"bentoml.easyocr\", globals(), \"bentoml.easyocr\")\n flax = _LazyLoader(\"bentoml.flax\", globals(), \"bentoml.flax\")\n fastai = _LazyLoader(\"bentoml.fastai\", globals(), \"bentoml.fastai\")\n gluon = _LazyLoader(\"bentoml.gluon\", globals(), \"bentoml.gluon\")\n h2o = _LazyLoader(\"bentoml.h2o\", globals(), \"bentoml.h2o\")\n lightgbm = _LazyLoader(\"bentoml.lightgbm\", globals(), \"bentoml.lightgbm\")\n mlflow = _LazyLoader(\"bentoml.mlflow\", globals(), \"bentoml.mlflow\")\n onnx = _LazyLoader(\"bentoml.onnx\", globals(), \"bentoml.onnx\")\n onnxmlir = _LazyLoader(\"bentoml.onnxmlir\", globals(), \"bentoml.onnxmlir\")\n keras = _LazyLoader(\"bentoml.keras\", globals(), \"bentoml.keras\")\n paddle = _LazyLoader(\"bentoml.paddle\", globals(), \"bentoml.paddle\")\n pycaret = _LazyLoader(\"bentoml.pycaret\", globals(), \"bentoml.pycaret\")\n pytorch = _LazyLoader(\"bentoml.pytorch\", globals(), \"bentoml.pytorch\")\n pytorch_lightning = _LazyLoader(\n \"bentoml.pytorch_lightning\", globals(), \"bentoml.pytorch_lightning\"\n )\n sklearn = _LazyLoader(\"bentoml.sklearn\", globals(), \"bentoml.sklearn\")\n picklable_model = _LazyLoader(\n \"bentoml.picklable_model\", globals(), \"bentoml.picklable_model\"\n )\n spacy = _LazyLoader(\"bentoml.spacy\", globals(), \"bentoml.spacy\")\n statsmodels = _LazyLoader(\"bentoml.statsmodels\", globals(), \"bentoml.statsmodels\")\n tensorflow = _LazyLoader(\"bentoml.tensorflow\", globals(), \"bentoml.tensorflow\")\n tensorflow_v1 = _LazyLoader(\n \"bentoml.tensorflow_v1\", globals(), \"bentoml.tensorflow_v1\"\n )\n torchscript = _LazyLoader(\"bentoml.torchscript\", globals(), \"bentoml.torchscript\")\n transformers = _LazyLoader(\n \"bentoml.transformers\", globals(), \"bentoml.transformers\"\n )\n xgboost = _LazyLoader(\"bentoml.xgboost\", globals(), \"bentoml.xgboost\")\n\n # Integrations\n triton = _LazyLoader(\"bentoml.triton\", globals(), \"bentoml.triton\")\n ray = _LazyLoader(\"bentoml.ray\", globals(), \"bentoml.ray\")\n\n io = _LazyLoader(\"bentoml.io\", globals(), \"bentoml.io\")\n batch = _LazyLoader(\"bentoml.batch\", globals(), \"bentoml.batch\")\n models = _LazyLoader(\"bentoml.models\", globals(), \"bentoml.models\")\n metrics = _LazyLoader(\"bentoml.metrics\", globals(), \"bentoml.metrics\")\n container = _LazyLoader(\"bentoml.container\", globals(), \"bentoml.container\")\n client = _LazyLoader(\"bentoml.client\", globals(), \"bentoml.client\")\n server = _LazyLoader(\"bentoml.server\", globals(), \"bentoml.server\")\n exceptions = _LazyLoader(\"bentoml.exceptions\", globals(), \"bentoml.exceptions\")\n monitoring = _LazyLoader(\"bentoml.monitoring\", globals(), \"bentoml.monitoring\")\n cloud = _LazyLoader(\"bentoml.cloud\", globals(), \"bentoml.cloud\")\n\n del _LazyLoader\n\n__all__ = [\n \"__version__\",\n \"Context\",\n \"Cookie\",\n \"Service\",\n \"models\",\n \"batch\",\n \"metrics\",\n \"container\",\n \"client\",\n \"server\",\n \"io\",\n \"Tag\",\n \"Model\",\n \"Runner\",\n \"Runnable\",\n \"monitoring\",\n \"YataiClient\", # Yatai REST API Client\n # bento APIs\n \"list\",\n \"get\",\n \"delete\",\n \"import_bento\",\n \"export_bento\",\n \"load\",\n \"push\",\n \"pull\",\n \"serve\",\n \"Bento\",\n \"exceptions\",\n # server APIs\n \"HTTPServer\",\n \"GrpcServer\",\n # Framework specific modules\n \"catboost\",\n \"detectron\",\n \"diffusers\",\n \"easyocr\",\n \"flax\",\n \"fastai\",\n \"gluon\",\n \"h2o\",\n \"lightgbm\",\n \"mlflow\",\n \"onnx\",\n \"onnxmlir\",\n \"paddle\",\n \"picklable_model\",\n \"pycaret\",\n \"pytorch\",\n \"pytorch_lightning\",\n \"keras\",\n \"sklearn\",\n \"spacy\",\n \"statsmodels\",\n \"tensorflow\",\n \"tensorflow_v1\",\n \"torchscript\",\n \"transformers\",\n \"xgboost\",\n # integrations\n \"ray\",\n \"cloud\",\n \"triton\",\n \"monitor\",\n \"load_config\",\n \"save_config\",\n \"set_serialization_strategy\",\n \"Strategy\",\n \"Resource\",\n]\n", "path": "src/bentoml/__init__.py"}]}
| 2,922 | 181 |
gh_patches_debug_10279
|
rasdani/github-patches
|
git_diff
|
streamlit__streamlit-8497
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Altair support - Layer charts, `.resolve_scale` dont appear to work
# Summary
Altair appears to work just fine inside streamlit, but I have problems getting layer charts to work. Note - Concat (vert/horizontal) of altair charts works fine, but doing something like
```
(chart1 + chart2).resolve_scale(y='independent')
```
results in everything going blank and no chart canvas displayed
# Steps to reproduce
What are the steps we should take to reproduce the bug:
1. Build 2 altair charts inside the same streamlit script, ideally with a shared x-axis like time/date (so it makes sense to crate a compound chart)
2. try to layer them using `(chart1 + chart2).resolve_scale(y='independent')`
## Expected behavior:
The layer chart should display with different y-axes
## Actual behavior:
Nothing displays - the chart fades as if to reload but everything goes blank
## Is this a regression?
this works elsewhere, e.g. in jupyterlab
# Debug info
- Streamlit version: 0.60.0
- Python version: Python 3.7.4
- Using Conda
- OS version: Mac OS X Catalina
- Browser version: Brave, Version 1.10.97 Chromium: 83.0.4103.116
# Additional information
If needed, add any other context about the problem here. For example, did this bug come from https://discuss.streamlit.io or another site? Link the original source here!
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `e2e_playwright/st_altair_chart.py`
Content:
```
1 # Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022-2024)
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import altair as alt
16 import numpy as np
17 import pandas as pd
18
19 import streamlit as st
20
21 np.random.seed(0)
22
23 data = np.random.randn(200, 3)
24 df = pd.DataFrame(data, columns=["a", "b", "c"])
25 chart = alt.Chart(df).mark_circle().encode(x="a", y="b", size="c", color="c")
26
27 st.write("Show default vega lite theme:")
28 st.altair_chart(chart, theme=None)
29
30 st.write("Show streamlit theme:")
31 st.altair_chart(chart, theme="streamlit")
32
33 st.write("Overwrite theme config:")
34 chart = (
35 alt.Chart(df, usermeta={"embedOptions": {"theme": None}})
36 .mark_circle()
37 .encode(x="a", y="b", size="c", color="c")
38 )
39 st.altair_chart(chart, theme="streamlit")
40
41 data = pd.DataFrame(
42 {
43 "a": ["A", "B", "C", "D", "E", "F", "G", "H", "I"],
44 "b": [28, 55, 43, 91, 81, 53, 19, 87, 52],
45 }
46 )
47
48 chart = alt.Chart(data).mark_bar().encode(x="a", y="b")
49
50 st.write("Bar chart with overwritten theme props:")
51 st.altair_chart(chart.configure_mark(color="black"), theme="streamlit")
52
53 # mark_arc was added in 4.2, but we have to support altair 4.0-4.1, so we
54 # have to skip this part of the test when testing min versions.
55 major, minor, patch = alt.__version__.split(".")
56 if not (major == "4" and minor < "2"):
57 source = pd.DataFrame(
58 {"category": [1, 2, 3, 4, 5, 6], "value": [4, 6, 10, 3, 7, 8]}
59 )
60
61 chart = (
62 alt.Chart(source)
63 .mark_arc(innerRadius=50)
64 .encode(
65 theta=alt.Theta(field="value", type="quantitative"),
66 color=alt.Color(field="category", type="nominal"),
67 )
68 )
69
70 st.write("Pie Chart with more than 4 Legend items")
71 st.altair_chart(chart, theme="streamlit")
72
73 # taken from vega_datasets barley example
74 barley = alt.UrlData(
75 "https://cdn.jsdelivr.net/npm/[email protected]/data/barley.json"
76 )
77
78 barley_chart = (
79 alt.Chart(barley)
80 .mark_bar()
81 .encode(x="year:O", y="sum(yield):Q", color="year:N", column="site:N")
82 )
83
84 st.write("Grouped Bar Chart with default theme:")
85 st.altair_chart(barley_chart, theme=None)
86
87 st.write("Grouped Bar Chart with streamlit theme:")
88 st.altair_chart(barley_chart, theme="streamlit")
89
90 st.write("Chart with use_container_width used")
91 st.altair_chart(barley_chart, theme=None, use_container_width=True)
92
93 st.write("Layered chart")
94 # Taken from vega_datasets
95 stocks = alt.UrlData(
96 "https://cdn.jsdelivr.net/npm/[email protected]/data/stocks.csv"
97 )
98
99 base = (
100 alt.Chart(stocks)
101 .encode(x="date:T", y="price:Q", color="symbol:N")
102 .transform_filter(alt.datum.symbol == "GOOG")
103 )
104
105 new_base_chart = base.mark_line() + base.mark_point()
106 st.altair_chart(new_base_chart)
107
108 x = np.linspace(10, 100, 10)
109 y1 = 5 * x
110 y2 = 1 / x
111
112 df1 = pd.DataFrame.from_dict({"x": x, "y1": y1, "y2": y2})
113
114 c1 = alt.Chart(df1).mark_line().encode(alt.X("x"), alt.Y("y1"))
115
116 c2 = alt.Chart(df1).mark_line().encode(alt.X("x"), alt.Y("y2"))
117
118 st.altair_chart(c1 & c2, use_container_width=True)
119
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/e2e_playwright/st_altair_chart.py b/e2e_playwright/st_altair_chart.py
--- a/e2e_playwright/st_altair_chart.py
+++ b/e2e_playwright/st_altair_chart.py
@@ -116,3 +116,20 @@
c2 = alt.Chart(df1).mark_line().encode(alt.X("x"), alt.Y("y2"))
st.altair_chart(c1 & c2, use_container_width=True)
+
+from altair.expr import datum
+
+results = [
+ [2016, 11525, 3],
+ [2017, 11517, 2],
+ [2018, 11521, 2],
+ [2019, 11519, 4],
+]
+
+dataframe = pd.DataFrame(results, columns=["Job Number", "Test Count", "Test Failures"])
+
+base = alt.Chart(dataframe).encode(alt.X("Job Number:O"))
+chart_test_count = base.mark_line().encode(alt.Y("Test Count:N"))
+chart_test_failures = base.mark_line().encode(alt.Y("Test Failures:N"))
+
+st.altair_chart((chart_test_count + chart_test_failures).resolve_scale(y="independent"))
|
{"golden_diff": "diff --git a/e2e_playwright/st_altair_chart.py b/e2e_playwright/st_altair_chart.py\n--- a/e2e_playwright/st_altair_chart.py\n+++ b/e2e_playwright/st_altair_chart.py\n@@ -116,3 +116,20 @@\n c2 = alt.Chart(df1).mark_line().encode(alt.X(\"x\"), alt.Y(\"y2\"))\n \n st.altair_chart(c1 & c2, use_container_width=True)\n+\n+from altair.expr import datum\n+\n+results = [\n+ [2016, 11525, 3],\n+ [2017, 11517, 2],\n+ [2018, 11521, 2],\n+ [2019, 11519, 4],\n+]\n+\n+dataframe = pd.DataFrame(results, columns=[\"Job Number\", \"Test Count\", \"Test Failures\"])\n+\n+base = alt.Chart(dataframe).encode(alt.X(\"Job Number:O\"))\n+chart_test_count = base.mark_line().encode(alt.Y(\"Test Count:N\"))\n+chart_test_failures = base.mark_line().encode(alt.Y(\"Test Failures:N\"))\n+\n+st.altair_chart((chart_test_count + chart_test_failures).resolve_scale(y=\"independent\"))\n", "issue": "Altair support - Layer charts, `.resolve_scale` dont appear to work\n# Summary\r\n\r\nAltair appears to work just fine inside streamlit, but I have problems getting layer charts to work. Note - Concat (vert/horizontal) of altair charts works fine, but doing something like\r\n```\r\n(chart1 + chart2).resolve_scale(y='independent') \r\n```\r\nresults in everything going blank and no chart canvas displayed\r\n\r\n# Steps to reproduce\r\n\r\nWhat are the steps we should take to reproduce the bug:\r\n\r\n1. Build 2 altair charts inside the same streamlit script, ideally with a shared x-axis like time/date (so it makes sense to crate a compound chart)\r\n2. try to layer them using `(chart1 + chart2).resolve_scale(y='independent')`\r\n\r\n## Expected behavior:\r\n\r\nThe layer chart should display with different y-axes\r\n## Actual behavior:\r\n\r\nNothing displays - the chart fades as if to reload but everything goes blank\r\n\r\n## Is this a regression?\r\n\r\nthis works elsewhere, e.g. in jupyterlab\r\n\r\n# Debug info\r\n\r\n- Streamlit version: 0.60.0\r\n- Python version: Python 3.7.4\r\n- Using Conda\r\n- OS version: Mac OS X Catalina\r\n- Browser version: Brave, Version 1.10.97 Chromium: 83.0.4103.116\r\n\r\n# Additional information\r\n\r\nIf needed, add any other context about the problem here. For example, did this bug come from https://discuss.streamlit.io or another site? Link the original source here!\r\n\n", "before_files": [{"content": "# Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022-2024)\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport altair as alt\nimport numpy as np\nimport pandas as pd\n\nimport streamlit as st\n\nnp.random.seed(0)\n\ndata = np.random.randn(200, 3)\ndf = pd.DataFrame(data, columns=[\"a\", \"b\", \"c\"])\nchart = alt.Chart(df).mark_circle().encode(x=\"a\", y=\"b\", size=\"c\", color=\"c\")\n\nst.write(\"Show default vega lite theme:\")\nst.altair_chart(chart, theme=None)\n\nst.write(\"Show streamlit theme:\")\nst.altair_chart(chart, theme=\"streamlit\")\n\nst.write(\"Overwrite theme config:\")\nchart = (\n alt.Chart(df, usermeta={\"embedOptions\": {\"theme\": None}})\n .mark_circle()\n .encode(x=\"a\", y=\"b\", size=\"c\", color=\"c\")\n)\nst.altair_chart(chart, theme=\"streamlit\")\n\ndata = pd.DataFrame(\n {\n \"a\": [\"A\", \"B\", \"C\", \"D\", \"E\", \"F\", \"G\", \"H\", \"I\"],\n \"b\": [28, 55, 43, 91, 81, 53, 19, 87, 52],\n }\n)\n\nchart = alt.Chart(data).mark_bar().encode(x=\"a\", y=\"b\")\n\nst.write(\"Bar chart with overwritten theme props:\")\nst.altair_chart(chart.configure_mark(color=\"black\"), theme=\"streamlit\")\n\n# mark_arc was added in 4.2, but we have to support altair 4.0-4.1, so we\n# have to skip this part of the test when testing min versions.\nmajor, minor, patch = alt.__version__.split(\".\")\nif not (major == \"4\" and minor < \"2\"):\n source = pd.DataFrame(\n {\"category\": [1, 2, 3, 4, 5, 6], \"value\": [4, 6, 10, 3, 7, 8]}\n )\n\n chart = (\n alt.Chart(source)\n .mark_arc(innerRadius=50)\n .encode(\n theta=alt.Theta(field=\"value\", type=\"quantitative\"),\n color=alt.Color(field=\"category\", type=\"nominal\"),\n )\n )\n\n st.write(\"Pie Chart with more than 4 Legend items\")\n st.altair_chart(chart, theme=\"streamlit\")\n\n# taken from vega_datasets barley example\nbarley = alt.UrlData(\n \"https://cdn.jsdelivr.net/npm/[email protected]/data/barley.json\"\n)\n\nbarley_chart = (\n alt.Chart(barley)\n .mark_bar()\n .encode(x=\"year:O\", y=\"sum(yield):Q\", color=\"year:N\", column=\"site:N\")\n)\n\nst.write(\"Grouped Bar Chart with default theme:\")\nst.altair_chart(barley_chart, theme=None)\n\nst.write(\"Grouped Bar Chart with streamlit theme:\")\nst.altair_chart(barley_chart, theme=\"streamlit\")\n\nst.write(\"Chart with use_container_width used\")\nst.altair_chart(barley_chart, theme=None, use_container_width=True)\n\nst.write(\"Layered chart\")\n# Taken from vega_datasets\nstocks = alt.UrlData(\n \"https://cdn.jsdelivr.net/npm/[email protected]/data/stocks.csv\"\n)\n\nbase = (\n alt.Chart(stocks)\n .encode(x=\"date:T\", y=\"price:Q\", color=\"symbol:N\")\n .transform_filter(alt.datum.symbol == \"GOOG\")\n)\n\nnew_base_chart = base.mark_line() + base.mark_point()\nst.altair_chart(new_base_chart)\n\nx = np.linspace(10, 100, 10)\ny1 = 5 * x\ny2 = 1 / x\n\ndf1 = pd.DataFrame.from_dict({\"x\": x, \"y1\": y1, \"y2\": y2})\n\nc1 = alt.Chart(df1).mark_line().encode(alt.X(\"x\"), alt.Y(\"y1\"))\n\nc2 = alt.Chart(df1).mark_line().encode(alt.X(\"x\"), alt.Y(\"y2\"))\n\nst.altair_chart(c1 & c2, use_container_width=True)\n", "path": "e2e_playwright/st_altair_chart.py"}], "after_files": [{"content": "# Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022-2024)\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport altair as alt\nimport numpy as np\nimport pandas as pd\n\nimport streamlit as st\n\nnp.random.seed(0)\n\ndata = np.random.randn(200, 3)\ndf = pd.DataFrame(data, columns=[\"a\", \"b\", \"c\"])\nchart = alt.Chart(df).mark_circle().encode(x=\"a\", y=\"b\", size=\"c\", color=\"c\")\n\nst.write(\"Show default vega lite theme:\")\nst.altair_chart(chart, theme=None)\n\nst.write(\"Show streamlit theme:\")\nst.altair_chart(chart, theme=\"streamlit\")\n\nst.write(\"Overwrite theme config:\")\nchart = (\n alt.Chart(df, usermeta={\"embedOptions\": {\"theme\": None}})\n .mark_circle()\n .encode(x=\"a\", y=\"b\", size=\"c\", color=\"c\")\n)\nst.altair_chart(chart, theme=\"streamlit\")\n\ndata = pd.DataFrame(\n {\n \"a\": [\"A\", \"B\", \"C\", \"D\", \"E\", \"F\", \"G\", \"H\", \"I\"],\n \"b\": [28, 55, 43, 91, 81, 53, 19, 87, 52],\n }\n)\n\nchart = alt.Chart(data).mark_bar().encode(x=\"a\", y=\"b\")\n\nst.write(\"Bar chart with overwritten theme props:\")\nst.altair_chart(chart.configure_mark(color=\"black\"), theme=\"streamlit\")\n\n# mark_arc was added in 4.2, but we have to support altair 4.0-4.1, so we\n# have to skip this part of the test when testing min versions.\nmajor, minor, patch = alt.__version__.split(\".\")\nif not (major == \"4\" and minor < \"2\"):\n source = pd.DataFrame(\n {\"category\": [1, 2, 3, 4, 5, 6], \"value\": [4, 6, 10, 3, 7, 8]}\n )\n\n chart = (\n alt.Chart(source)\n .mark_arc(innerRadius=50)\n .encode(\n theta=alt.Theta(field=\"value\", type=\"quantitative\"),\n color=alt.Color(field=\"category\", type=\"nominal\"),\n )\n )\n\n st.write(\"Pie Chart with more than 4 Legend items\")\n st.altair_chart(chart, theme=\"streamlit\")\n\n# taken from vega_datasets barley example\nbarley = alt.UrlData(\n \"https://cdn.jsdelivr.net/npm/[email protected]/data/barley.json\"\n)\n\nbarley_chart = (\n alt.Chart(barley)\n .mark_bar()\n .encode(x=\"year:O\", y=\"sum(yield):Q\", color=\"year:N\", column=\"site:N\")\n)\n\nst.write(\"Grouped Bar Chart with default theme:\")\nst.altair_chart(barley_chart, theme=None)\n\nst.write(\"Grouped Bar Chart with streamlit theme:\")\nst.altair_chart(barley_chart, theme=\"streamlit\")\n\nst.write(\"Chart with use_container_width used\")\nst.altair_chart(barley_chart, theme=None, use_container_width=True)\n\nst.write(\"Layered chart\")\n# Taken from vega_datasets\nstocks = alt.UrlData(\n \"https://cdn.jsdelivr.net/npm/[email protected]/data/stocks.csv\"\n)\n\nbase = (\n alt.Chart(stocks)\n .encode(x=\"date:T\", y=\"price:Q\", color=\"symbol:N\")\n .transform_filter(alt.datum.symbol == \"GOOG\")\n)\n\nnew_base_chart = base.mark_line() + base.mark_point()\nst.altair_chart(new_base_chart)\n\nx = np.linspace(10, 100, 10)\ny1 = 5 * x\ny2 = 1 / x\n\ndf1 = pd.DataFrame.from_dict({\"x\": x, \"y1\": y1, \"y2\": y2})\n\nc1 = alt.Chart(df1).mark_line().encode(alt.X(\"x\"), alt.Y(\"y1\"))\n\nc2 = alt.Chart(df1).mark_line().encode(alt.X(\"x\"), alt.Y(\"y2\"))\n\nst.altair_chart(c1 & c2, use_container_width=True)\n\nfrom altair.expr import datum\n\nresults = [\n [2016, 11525, 3],\n [2017, 11517, 2],\n [2018, 11521, 2],\n [2019, 11519, 4],\n]\n\ndataframe = pd.DataFrame(results, columns=[\"Job Number\", \"Test Count\", \"Test Failures\"])\n\nbase = alt.Chart(dataframe).encode(alt.X(\"Job Number:O\"))\nchart_test_count = base.mark_line().encode(alt.Y(\"Test Count:N\"))\nchart_test_failures = base.mark_line().encode(alt.Y(\"Test Failures:N\"))\n\nst.altair_chart((chart_test_count + chart_test_failures).resolve_scale(y=\"independent\"))\n", "path": "e2e_playwright/st_altair_chart.py"}]}
| 1,934 | 298 |
gh_patches_debug_32010
|
rasdani/github-patches
|
git_diff
|
cal-itp__benefits-2125
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Improve analytics for auth claims flow
With the move to integer claims in #2046, IdG will return error codes in place of True (`0`) or False (`1`) when an error occurs in processing the user's eligibility. Any integer that is greater than or equal to `10` is considered an error code.
We want to improve our analytics around the auth flow, in particular to capture any errors that occur as described by IdG error codes.
## Acceptance Criteria
<!-- Remember to consider edge cases -->
- [x] The [`FinishedSignInEvent`](https://github.com/cal-itp/benefits/blob/dev/benefits/oauth/analytics.py#L32) is updated to allow an optional `error_code` event property.
- [x] The `error_code` property is the integer value of the claim error code (originally transmitted as a string)
## Additional context
In #2046, we will handle the regular True (`0`) / False (`1`) cases.
This issue has been scoped back to exclude lookup of the error message associated with a given code. This needs further discussion and design with CDT.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `benefits/oauth/analytics.py`
Content:
```
1 """
2 The oauth application: analytics implementation.
3 """
4
5 from benefits.core import analytics as core, session
6
7
8 class OAuthEvent(core.Event):
9 """Base OAuth analytics event."""
10
11 def __init__(self, request, event_type):
12 super().__init__(request, event_type)
13 verifier = session.verifier(request)
14 if verifier and verifier.uses_auth_verification:
15 self.update_event_properties(auth_provider=verifier.auth_provider.client_name)
16
17
18 class StartedSignInEvent(OAuthEvent):
19 """Analytics event representing the beginning of the OAuth sign in flow."""
20
21 def __init__(self, request):
22 super().__init__(request, "started sign in")
23
24
25 class CanceledSignInEvent(OAuthEvent):
26 """Analytics event representing the canceling of application sign in."""
27
28 def __init__(self, request):
29 super().__init__(request, "canceled sign in")
30
31
32 class FinishedSignInEvent(OAuthEvent):
33 """Analytics event representing the end of the OAuth sign in flow."""
34
35 def __init__(self, request):
36 super().__init__(request, "finished sign in")
37
38
39 class StartedSignOutEvent(OAuthEvent):
40 """Analytics event representing the beginning of application sign out."""
41
42 def __init__(self, request):
43 super().__init__(request, "started sign out")
44
45
46 class FinishedSignOutEvent(OAuthEvent):
47 """Analytics event representing the end of application sign out."""
48
49 def __init__(self, request):
50 super().__init__(request, "finished sign out")
51 self.update_event_properties(origin=session.origin(request))
52
53
54 def started_sign_in(request):
55 """Send the "started sign in" analytics event."""
56 core.send_event(StartedSignInEvent(request))
57
58
59 def canceled_sign_in(request):
60 """Send the "canceled sign in" analytics event."""
61 core.send_event(CanceledSignInEvent(request))
62
63
64 def finished_sign_in(request):
65 """Send the "finished sign in" analytics event."""
66 core.send_event(FinishedSignInEvent(request))
67
68
69 def started_sign_out(request):
70 """Send the "started signed out" analytics event."""
71 core.send_event(StartedSignOutEvent(request))
72
73
74 def finished_sign_out(request):
75 """Send the "finished sign out" analytics event."""
76 core.send_event(FinishedSignOutEvent(request))
77
```
Path: `benefits/oauth/views.py`
Content:
```
1 import logging
2
3 from django.shortcuts import redirect
4 from django.urls import reverse
5 from django.utils.decorators import decorator_from_middleware
6
7 from benefits.core import session
8 from . import analytics, redirects
9 from .client import oauth
10 from .middleware import VerifierUsesAuthVerificationSessionRequired
11
12
13 logger = logging.getLogger(__name__)
14
15
16 ROUTE_AUTH = "oauth:authorize"
17 ROUTE_START = "eligibility:start"
18 ROUTE_CONFIRM = "eligibility:confirm"
19 ROUTE_UNVERIFIED = "eligibility:unverified"
20 ROUTE_POST_LOGOUT = "oauth:post_logout"
21
22
23 @decorator_from_middleware(VerifierUsesAuthVerificationSessionRequired)
24 def login(request):
25 """View implementing OIDC authorize_redirect."""
26 verifier = session.verifier(request)
27 oauth_client = oauth.create_client(verifier.auth_provider.client_name)
28
29 if not oauth_client:
30 raise Exception(f"oauth_client not registered: {verifier.auth_provider.client_name}")
31
32 route = reverse(ROUTE_AUTH)
33 redirect_uri = redirects.generate_redirect_uri(request, route)
34
35 logger.debug(f"OAuth authorize_redirect with redirect_uri: {redirect_uri}")
36
37 analytics.started_sign_in(request)
38
39 return oauth_client.authorize_redirect(request, redirect_uri)
40
41
42 @decorator_from_middleware(VerifierUsesAuthVerificationSessionRequired)
43 def authorize(request):
44 """View implementing OIDC token authorization."""
45 verifier = session.verifier(request)
46 oauth_client = oauth.create_client(verifier.auth_provider.client_name)
47
48 if not oauth_client:
49 raise Exception(f"oauth_client not registered: {verifier.auth_provider.client_name}")
50
51 logger.debug("Attempting to authorize OAuth access token")
52 token = oauth_client.authorize_access_token(request)
53
54 if token is None:
55 logger.warning("Could not authorize OAuth access token")
56 return redirect(ROUTE_START)
57
58 logger.debug("OAuth access token authorized")
59
60 # We store the id_token in the user's session. This is the minimal amount of information needed later to log the user out.
61 id_token = token["id_token"]
62
63 # We store the returned claim in case it can be used later in eligibility verification.
64 verifier_claim = verifier.auth_provider.claim
65 stored_claim = None
66
67 if verifier_claim:
68 userinfo = token.get("userinfo")
69
70 if userinfo:
71 claim_value = userinfo.get(verifier_claim)
72 # the claim comes back in userinfo like { "claim": "1" | "0" }
73 claim_value = int(claim_value) if claim_value else None
74 if claim_value is None:
75 logger.warning(f"userinfo did not contain: {verifier_claim}")
76 elif claim_value == 1:
77 # if userinfo contains our claim and the flag is 1 (true), store the *claim*
78 stored_claim = verifier_claim
79
80 session.update(request, oauth_token=id_token, oauth_claim=stored_claim)
81
82 analytics.finished_sign_in(request)
83
84 return redirect(ROUTE_CONFIRM)
85
86
87 @decorator_from_middleware(VerifierUsesAuthVerificationSessionRequired)
88 def cancel(request):
89 """View implementing cancellation of OIDC authorization."""
90
91 analytics.canceled_sign_in(request)
92
93 return redirect(ROUTE_UNVERIFIED)
94
95
96 @decorator_from_middleware(VerifierUsesAuthVerificationSessionRequired)
97 def logout(request):
98 """View implementing OIDC and application sign out."""
99 verifier = session.verifier(request)
100 oauth_client = oauth.create_client(verifier.auth_provider.client_name)
101
102 if not oauth_client:
103 raise Exception(f"oauth_client not registered: {verifier.auth_provider.client_name}")
104
105 analytics.started_sign_out(request)
106
107 # overwrite the oauth session token, the user is signed out of the app
108 token = session.oauth_token(request)
109 session.logout(request)
110
111 route = reverse(ROUTE_POST_LOGOUT)
112 redirect_uri = redirects.generate_redirect_uri(request, route)
113
114 logger.debug(f"OAuth end_session_endpoint with redirect_uri: {redirect_uri}")
115
116 # send the user through the end_session_endpoint, redirecting back to
117 # the post_logout route
118 return redirects.deauthorize_redirect(oauth_client, token, redirect_uri)
119
120
121 @decorator_from_middleware(VerifierUsesAuthVerificationSessionRequired)
122 def post_logout(request):
123 """View routes the user to their origin after sign out."""
124
125 analytics.finished_sign_out(request)
126
127 origin = session.origin(request)
128 return redirect(origin)
129
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/benefits/oauth/analytics.py b/benefits/oauth/analytics.py
--- a/benefits/oauth/analytics.py
+++ b/benefits/oauth/analytics.py
@@ -32,8 +32,10 @@
class FinishedSignInEvent(OAuthEvent):
"""Analytics event representing the end of the OAuth sign in flow."""
- def __init__(self, request):
+ def __init__(self, request, error=None):
super().__init__(request, "finished sign in")
+ if error is not None:
+ self.update_event_properties(error_code=error)
class StartedSignOutEvent(OAuthEvent):
@@ -61,9 +63,9 @@
core.send_event(CanceledSignInEvent(request))
-def finished_sign_in(request):
+def finished_sign_in(request, error=None):
"""Send the "finished sign in" analytics event."""
- core.send_event(FinishedSignInEvent(request))
+ core.send_event(FinishedSignInEvent(request, error))
def started_sign_out(request):
diff --git a/benefits/oauth/views.py b/benefits/oauth/views.py
--- a/benefits/oauth/views.py
+++ b/benefits/oauth/views.py
@@ -64,6 +64,8 @@
verifier_claim = verifier.auth_provider.claim
stored_claim = None
+ error_claim = None
+
if verifier_claim:
userinfo = token.get("userinfo")
@@ -76,10 +78,11 @@
elif claim_value == 1:
# if userinfo contains our claim and the flag is 1 (true), store the *claim*
stored_claim = verifier_claim
+ elif claim_value >= 10:
+ error_claim = claim_value
session.update(request, oauth_token=id_token, oauth_claim=stored_claim)
-
- analytics.finished_sign_in(request)
+ analytics.finished_sign_in(request, error=error_claim)
return redirect(ROUTE_CONFIRM)
|
{"golden_diff": "diff --git a/benefits/oauth/analytics.py b/benefits/oauth/analytics.py\n--- a/benefits/oauth/analytics.py\n+++ b/benefits/oauth/analytics.py\n@@ -32,8 +32,10 @@\n class FinishedSignInEvent(OAuthEvent):\n \"\"\"Analytics event representing the end of the OAuth sign in flow.\"\"\"\n \n- def __init__(self, request):\n+ def __init__(self, request, error=None):\n super().__init__(request, \"finished sign in\")\n+ if error is not None:\n+ self.update_event_properties(error_code=error)\n \n \n class StartedSignOutEvent(OAuthEvent):\n@@ -61,9 +63,9 @@\n core.send_event(CanceledSignInEvent(request))\n \n \n-def finished_sign_in(request):\n+def finished_sign_in(request, error=None):\n \"\"\"Send the \"finished sign in\" analytics event.\"\"\"\n- core.send_event(FinishedSignInEvent(request))\n+ core.send_event(FinishedSignInEvent(request, error))\n \n \n def started_sign_out(request):\ndiff --git a/benefits/oauth/views.py b/benefits/oauth/views.py\n--- a/benefits/oauth/views.py\n+++ b/benefits/oauth/views.py\n@@ -64,6 +64,8 @@\n verifier_claim = verifier.auth_provider.claim\n stored_claim = None\n \n+ error_claim = None\n+\n if verifier_claim:\n userinfo = token.get(\"userinfo\")\n \n@@ -76,10 +78,11 @@\n elif claim_value == 1:\n # if userinfo contains our claim and the flag is 1 (true), store the *claim*\n stored_claim = verifier_claim\n+ elif claim_value >= 10:\n+ error_claim = claim_value\n \n session.update(request, oauth_token=id_token, oauth_claim=stored_claim)\n-\n- analytics.finished_sign_in(request)\n+ analytics.finished_sign_in(request, error=error_claim)\n \n return redirect(ROUTE_CONFIRM)\n", "issue": "Improve analytics for auth claims flow\nWith the move to integer claims in #2046, IdG will return error codes in place of True (`0`) or False (`1`) when an error occurs in processing the user's eligibility. Any integer that is greater than or equal to `10` is considered an error code.\n\nWe want to improve our analytics around the auth flow, in particular to capture any errors that occur as described by IdG error codes.\n\n## Acceptance Criteria\n\n<!-- Remember to consider edge cases -->\n\n- [x] The [`FinishedSignInEvent`](https://github.com/cal-itp/benefits/blob/dev/benefits/oauth/analytics.py#L32) is updated to allow an optional `error_code` event property.\n- [x] The `error_code` property is the integer value of the claim error code (originally transmitted as a string)\n\n## Additional context\n\nIn #2046, we will handle the regular True (`0`) / False (`1`) cases.\n\nThis issue has been scoped back to exclude lookup of the error message associated with a given code. This needs further discussion and design with CDT.\n", "before_files": [{"content": "\"\"\"\nThe oauth application: analytics implementation.\n\"\"\"\n\nfrom benefits.core import analytics as core, session\n\n\nclass OAuthEvent(core.Event):\n \"\"\"Base OAuth analytics event.\"\"\"\n\n def __init__(self, request, event_type):\n super().__init__(request, event_type)\n verifier = session.verifier(request)\n if verifier and verifier.uses_auth_verification:\n self.update_event_properties(auth_provider=verifier.auth_provider.client_name)\n\n\nclass StartedSignInEvent(OAuthEvent):\n \"\"\"Analytics event representing the beginning of the OAuth sign in flow.\"\"\"\n\n def __init__(self, request):\n super().__init__(request, \"started sign in\")\n\n\nclass CanceledSignInEvent(OAuthEvent):\n \"\"\"Analytics event representing the canceling of application sign in.\"\"\"\n\n def __init__(self, request):\n super().__init__(request, \"canceled sign in\")\n\n\nclass FinishedSignInEvent(OAuthEvent):\n \"\"\"Analytics event representing the end of the OAuth sign in flow.\"\"\"\n\n def __init__(self, request):\n super().__init__(request, \"finished sign in\")\n\n\nclass StartedSignOutEvent(OAuthEvent):\n \"\"\"Analytics event representing the beginning of application sign out.\"\"\"\n\n def __init__(self, request):\n super().__init__(request, \"started sign out\")\n\n\nclass FinishedSignOutEvent(OAuthEvent):\n \"\"\"Analytics event representing the end of application sign out.\"\"\"\n\n def __init__(self, request):\n super().__init__(request, \"finished sign out\")\n self.update_event_properties(origin=session.origin(request))\n\n\ndef started_sign_in(request):\n \"\"\"Send the \"started sign in\" analytics event.\"\"\"\n core.send_event(StartedSignInEvent(request))\n\n\ndef canceled_sign_in(request):\n \"\"\"Send the \"canceled sign in\" analytics event.\"\"\"\n core.send_event(CanceledSignInEvent(request))\n\n\ndef finished_sign_in(request):\n \"\"\"Send the \"finished sign in\" analytics event.\"\"\"\n core.send_event(FinishedSignInEvent(request))\n\n\ndef started_sign_out(request):\n \"\"\"Send the \"started signed out\" analytics event.\"\"\"\n core.send_event(StartedSignOutEvent(request))\n\n\ndef finished_sign_out(request):\n \"\"\"Send the \"finished sign out\" analytics event.\"\"\"\n core.send_event(FinishedSignOutEvent(request))\n", "path": "benefits/oauth/analytics.py"}, {"content": "import logging\n\nfrom django.shortcuts import redirect\nfrom django.urls import reverse\nfrom django.utils.decorators import decorator_from_middleware\n\nfrom benefits.core import session\nfrom . import analytics, redirects\nfrom .client import oauth\nfrom .middleware import VerifierUsesAuthVerificationSessionRequired\n\n\nlogger = logging.getLogger(__name__)\n\n\nROUTE_AUTH = \"oauth:authorize\"\nROUTE_START = \"eligibility:start\"\nROUTE_CONFIRM = \"eligibility:confirm\"\nROUTE_UNVERIFIED = \"eligibility:unverified\"\nROUTE_POST_LOGOUT = \"oauth:post_logout\"\n\n\n@decorator_from_middleware(VerifierUsesAuthVerificationSessionRequired)\ndef login(request):\n \"\"\"View implementing OIDC authorize_redirect.\"\"\"\n verifier = session.verifier(request)\n oauth_client = oauth.create_client(verifier.auth_provider.client_name)\n\n if not oauth_client:\n raise Exception(f\"oauth_client not registered: {verifier.auth_provider.client_name}\")\n\n route = reverse(ROUTE_AUTH)\n redirect_uri = redirects.generate_redirect_uri(request, route)\n\n logger.debug(f\"OAuth authorize_redirect with redirect_uri: {redirect_uri}\")\n\n analytics.started_sign_in(request)\n\n return oauth_client.authorize_redirect(request, redirect_uri)\n\n\n@decorator_from_middleware(VerifierUsesAuthVerificationSessionRequired)\ndef authorize(request):\n \"\"\"View implementing OIDC token authorization.\"\"\"\n verifier = session.verifier(request)\n oauth_client = oauth.create_client(verifier.auth_provider.client_name)\n\n if not oauth_client:\n raise Exception(f\"oauth_client not registered: {verifier.auth_provider.client_name}\")\n\n logger.debug(\"Attempting to authorize OAuth access token\")\n token = oauth_client.authorize_access_token(request)\n\n if token is None:\n logger.warning(\"Could not authorize OAuth access token\")\n return redirect(ROUTE_START)\n\n logger.debug(\"OAuth access token authorized\")\n\n # We store the id_token in the user's session. This is the minimal amount of information needed later to log the user out.\n id_token = token[\"id_token\"]\n\n # We store the returned claim in case it can be used later in eligibility verification.\n verifier_claim = verifier.auth_provider.claim\n stored_claim = None\n\n if verifier_claim:\n userinfo = token.get(\"userinfo\")\n\n if userinfo:\n claim_value = userinfo.get(verifier_claim)\n # the claim comes back in userinfo like { \"claim\": \"1\" | \"0\" }\n claim_value = int(claim_value) if claim_value else None\n if claim_value is None:\n logger.warning(f\"userinfo did not contain: {verifier_claim}\")\n elif claim_value == 1:\n # if userinfo contains our claim and the flag is 1 (true), store the *claim*\n stored_claim = verifier_claim\n\n session.update(request, oauth_token=id_token, oauth_claim=stored_claim)\n\n analytics.finished_sign_in(request)\n\n return redirect(ROUTE_CONFIRM)\n\n\n@decorator_from_middleware(VerifierUsesAuthVerificationSessionRequired)\ndef cancel(request):\n \"\"\"View implementing cancellation of OIDC authorization.\"\"\"\n\n analytics.canceled_sign_in(request)\n\n return redirect(ROUTE_UNVERIFIED)\n\n\n@decorator_from_middleware(VerifierUsesAuthVerificationSessionRequired)\ndef logout(request):\n \"\"\"View implementing OIDC and application sign out.\"\"\"\n verifier = session.verifier(request)\n oauth_client = oauth.create_client(verifier.auth_provider.client_name)\n\n if not oauth_client:\n raise Exception(f\"oauth_client not registered: {verifier.auth_provider.client_name}\")\n\n analytics.started_sign_out(request)\n\n # overwrite the oauth session token, the user is signed out of the app\n token = session.oauth_token(request)\n session.logout(request)\n\n route = reverse(ROUTE_POST_LOGOUT)\n redirect_uri = redirects.generate_redirect_uri(request, route)\n\n logger.debug(f\"OAuth end_session_endpoint with redirect_uri: {redirect_uri}\")\n\n # send the user through the end_session_endpoint, redirecting back to\n # the post_logout route\n return redirects.deauthorize_redirect(oauth_client, token, redirect_uri)\n\n\n@decorator_from_middleware(VerifierUsesAuthVerificationSessionRequired)\ndef post_logout(request):\n \"\"\"View routes the user to their origin after sign out.\"\"\"\n\n analytics.finished_sign_out(request)\n\n origin = session.origin(request)\n return redirect(origin)\n", "path": "benefits/oauth/views.py"}], "after_files": [{"content": "\"\"\"\nThe oauth application: analytics implementation.\n\"\"\"\n\nfrom benefits.core import analytics as core, session\n\n\nclass OAuthEvent(core.Event):\n \"\"\"Base OAuth analytics event.\"\"\"\n\n def __init__(self, request, event_type):\n super().__init__(request, event_type)\n verifier = session.verifier(request)\n if verifier and verifier.uses_auth_verification:\n self.update_event_properties(auth_provider=verifier.auth_provider.client_name)\n\n\nclass StartedSignInEvent(OAuthEvent):\n \"\"\"Analytics event representing the beginning of the OAuth sign in flow.\"\"\"\n\n def __init__(self, request):\n super().__init__(request, \"started sign in\")\n\n\nclass CanceledSignInEvent(OAuthEvent):\n \"\"\"Analytics event representing the canceling of application sign in.\"\"\"\n\n def __init__(self, request):\n super().__init__(request, \"canceled sign in\")\n\n\nclass FinishedSignInEvent(OAuthEvent):\n \"\"\"Analytics event representing the end of the OAuth sign in flow.\"\"\"\n\n def __init__(self, request, error=None):\n super().__init__(request, \"finished sign in\")\n if error is not None:\n self.update_event_properties(error_code=error)\n\n\nclass StartedSignOutEvent(OAuthEvent):\n \"\"\"Analytics event representing the beginning of application sign out.\"\"\"\n\n def __init__(self, request):\n super().__init__(request, \"started sign out\")\n\n\nclass FinishedSignOutEvent(OAuthEvent):\n \"\"\"Analytics event representing the end of application sign out.\"\"\"\n\n def __init__(self, request):\n super().__init__(request, \"finished sign out\")\n self.update_event_properties(origin=session.origin(request))\n\n\ndef started_sign_in(request):\n \"\"\"Send the \"started sign in\" analytics event.\"\"\"\n core.send_event(StartedSignInEvent(request))\n\n\ndef canceled_sign_in(request):\n \"\"\"Send the \"canceled sign in\" analytics event.\"\"\"\n core.send_event(CanceledSignInEvent(request))\n\n\ndef finished_sign_in(request, error=None):\n \"\"\"Send the \"finished sign in\" analytics event.\"\"\"\n core.send_event(FinishedSignInEvent(request, error))\n\n\ndef started_sign_out(request):\n \"\"\"Send the \"started signed out\" analytics event.\"\"\"\n core.send_event(StartedSignOutEvent(request))\n\n\ndef finished_sign_out(request):\n \"\"\"Send the \"finished sign out\" analytics event.\"\"\"\n core.send_event(FinishedSignOutEvent(request))\n", "path": "benefits/oauth/analytics.py"}, {"content": "import logging\n\nfrom django.shortcuts import redirect\nfrom django.urls import reverse\nfrom django.utils.decorators import decorator_from_middleware\n\nfrom benefits.core import session\nfrom . import analytics, redirects\nfrom .client import oauth\nfrom .middleware import VerifierUsesAuthVerificationSessionRequired\n\n\nlogger = logging.getLogger(__name__)\n\n\nROUTE_AUTH = \"oauth:authorize\"\nROUTE_START = \"eligibility:start\"\nROUTE_CONFIRM = \"eligibility:confirm\"\nROUTE_UNVERIFIED = \"eligibility:unverified\"\nROUTE_POST_LOGOUT = \"oauth:post_logout\"\n\n\n@decorator_from_middleware(VerifierUsesAuthVerificationSessionRequired)\ndef login(request):\n \"\"\"View implementing OIDC authorize_redirect.\"\"\"\n verifier = session.verifier(request)\n oauth_client = oauth.create_client(verifier.auth_provider.client_name)\n\n if not oauth_client:\n raise Exception(f\"oauth_client not registered: {verifier.auth_provider.client_name}\")\n\n route = reverse(ROUTE_AUTH)\n redirect_uri = redirects.generate_redirect_uri(request, route)\n\n logger.debug(f\"OAuth authorize_redirect with redirect_uri: {redirect_uri}\")\n\n analytics.started_sign_in(request)\n\n return oauth_client.authorize_redirect(request, redirect_uri)\n\n\n@decorator_from_middleware(VerifierUsesAuthVerificationSessionRequired)\ndef authorize(request):\n \"\"\"View implementing OIDC token authorization.\"\"\"\n verifier = session.verifier(request)\n oauth_client = oauth.create_client(verifier.auth_provider.client_name)\n\n if not oauth_client:\n raise Exception(f\"oauth_client not registered: {verifier.auth_provider.client_name}\")\n\n logger.debug(\"Attempting to authorize OAuth access token\")\n token = oauth_client.authorize_access_token(request)\n\n if token is None:\n logger.warning(\"Could not authorize OAuth access token\")\n return redirect(ROUTE_START)\n\n logger.debug(\"OAuth access token authorized\")\n\n # We store the id_token in the user's session. This is the minimal amount of information needed later to log the user out.\n id_token = token[\"id_token\"]\n\n # We store the returned claim in case it can be used later in eligibility verification.\n verifier_claim = verifier.auth_provider.claim\n stored_claim = None\n\n error_claim = None\n\n if verifier_claim:\n userinfo = token.get(\"userinfo\")\n\n if userinfo:\n claim_value = userinfo.get(verifier_claim)\n # the claim comes back in userinfo like { \"claim\": \"1\" | \"0\" }\n claim_value = int(claim_value) if claim_value else None\n if claim_value is None:\n logger.warning(f\"userinfo did not contain: {verifier_claim}\")\n elif claim_value == 1:\n # if userinfo contains our claim and the flag is 1 (true), store the *claim*\n stored_claim = verifier_claim\n elif claim_value >= 10:\n error_claim = claim_value\n\n session.update(request, oauth_token=id_token, oauth_claim=stored_claim)\n analytics.finished_sign_in(request, error=error_claim)\n\n return redirect(ROUTE_CONFIRM)\n\n\n@decorator_from_middleware(VerifierUsesAuthVerificationSessionRequired)\ndef cancel(request):\n \"\"\"View implementing cancellation of OIDC authorization.\"\"\"\n\n analytics.canceled_sign_in(request)\n\n return redirect(ROUTE_UNVERIFIED)\n\n\n@decorator_from_middleware(VerifierUsesAuthVerificationSessionRequired)\ndef logout(request):\n \"\"\"View implementing OIDC and application sign out.\"\"\"\n verifier = session.verifier(request)\n oauth_client = oauth.create_client(verifier.auth_provider.client_name)\n\n if not oauth_client:\n raise Exception(f\"oauth_client not registered: {verifier.auth_provider.client_name}\")\n\n analytics.started_sign_out(request)\n\n # overwrite the oauth session token, the user is signed out of the app\n token = session.oauth_token(request)\n session.logout(request)\n\n route = reverse(ROUTE_POST_LOGOUT)\n redirect_uri = redirects.generate_redirect_uri(request, route)\n\n logger.debug(f\"OAuth end_session_endpoint with redirect_uri: {redirect_uri}\")\n\n # send the user through the end_session_endpoint, redirecting back to\n # the post_logout route\n return redirects.deauthorize_redirect(oauth_client, token, redirect_uri)\n\n\n@decorator_from_middleware(VerifierUsesAuthVerificationSessionRequired)\ndef post_logout(request):\n \"\"\"View routes the user to their origin after sign out.\"\"\"\n\n analytics.finished_sign_out(request)\n\n origin = session.origin(request)\n return redirect(origin)\n", "path": "benefits/oauth/views.py"}]}
| 2,342 | 422 |
gh_patches_debug_53850
|
rasdani/github-patches
|
git_diff
|
pyca__cryptography-5648
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Testing with tar bundle complains about unknown pytest markers
The source bundle ``cryptography-3.3.1.tar.gz`` does not include tox.ini. The ini file is also used to define pytest markers and pytest configuration:
```
[pytest]
addopts = -r s
markers =
requires_backend_interface: this test requires a specific backend interface
skip_fips: this test is not executed in FIPS mode
supported: parametrized test requiring only_if and skip_message
wycheproof_tests: this test runs a wycheproof fixture
```
pytest complains about unknown markers. Downstream tests with the source bundle is flooding the log file with warnings like:
```
tests/test_fernet.py:43
/builddir/build/BUILD/cryptography-3.3.1/tests/test_fernet.py:43: PytestUnknownMarkWarning: Unknown pytest.mark.requires_backend_interface - is this a typo? You can register custom marks to avoid this warning - for details, see https://docs.pytest.org/en/stable/mark.html
@pytest.mark.requires_backend_interface(interface=CipherBackend)
```
Suggested solutions:
* ship ``tox.ini`` in source distribution
* Move marker definitions to ``pytest.ini`` and ship that file
* Move marker definitions to ``pyproject.toml``
* Define markers on conftest.py
```
def pytest_configure(config):
config.addinivalue_line("markers", "requires_backend_interface: this test requires a specific backend interface")
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2
3 # This file is dual licensed under the terms of the Apache License, Version
4 # 2.0, and the BSD License. See the LICENSE file in the root of this repository
5 # for complete details.
6
7 import os
8 import sys
9
10 from setuptools import find_packages, setup
11
12
13 base_dir = os.path.dirname(__file__)
14 src_dir = os.path.join(base_dir, "src")
15
16 # When executing the setup.py, we need to be able to import ourselves, this
17 # means that we need to add the src/ directory to the sys.path.
18 sys.path.insert(0, src_dir)
19
20 about = {}
21 with open(os.path.join(src_dir, "cryptography", "__about__.py")) as f:
22 exec(f.read(), about)
23
24
25 # `setup_requirements` must be kept in sync with `pyproject.toml`
26 setup_requirements = ["cffi>=1.12"]
27
28 with open(os.path.join(base_dir, "README.rst")) as f:
29 long_description = f.read()
30
31
32 try:
33 setup(
34 name=about["__title__"],
35 version=about["__version__"],
36 description=about["__summary__"],
37 long_description=long_description,
38 long_description_content_type="text/x-rst",
39 license=about["__license__"],
40 url=about["__uri__"],
41 author=about["__author__"],
42 author_email=about["__email__"],
43 classifiers=[
44 "Development Status :: 5 - Production/Stable",
45 "Intended Audience :: Developers",
46 "License :: OSI Approved :: Apache Software License",
47 "License :: OSI Approved :: BSD License",
48 "Natural Language :: English",
49 "Operating System :: MacOS :: MacOS X",
50 "Operating System :: POSIX",
51 "Operating System :: POSIX :: BSD",
52 "Operating System :: POSIX :: Linux",
53 "Operating System :: Microsoft :: Windows",
54 "Programming Language :: Python",
55 "Programming Language :: Python :: 3",
56 "Programming Language :: Python :: 3 :: Only",
57 "Programming Language :: Python :: 3.6",
58 "Programming Language :: Python :: 3.7",
59 "Programming Language :: Python :: 3.8",
60 "Programming Language :: Python :: 3.9",
61 "Programming Language :: Python :: Implementation :: CPython",
62 "Programming Language :: Python :: Implementation :: PyPy",
63 "Topic :: Security :: Cryptography",
64 ],
65 package_dir={"": "src"},
66 packages=find_packages(
67 where="src", exclude=["_cffi_src", "_cffi_src.*"]
68 ),
69 include_package_data=True,
70 python_requires=">=3.6",
71 install_requires=setup_requirements,
72 setup_requires=setup_requirements,
73 extras_require={
74 "test": [
75 "pytest>=4.4.0",
76 "pytest-cov",
77 "pytest-subtests",
78 "pytest-xdist",
79 "pretend",
80 "iso8601",
81 "pytz",
82 "hypothesis>=1.11.4,!=3.79.2",
83 ],
84 "docs": [
85 "sphinx >= 1.6.5,!=1.8.0,!=3.1.0,!=3.1.1",
86 "sphinx_rtd_theme",
87 ],
88 "docstest": [
89 "doc8",
90 "pyenchant >= 1.6.11",
91 "twine >= 1.12.0",
92 "sphinxcontrib-spelling >= 4.0.1",
93 ],
94 "pep8test": [
95 "black",
96 "flake8",
97 "flake8-import-order",
98 "pep8-naming",
99 ],
100 # This extra is for OpenSSH private keys that use bcrypt KDF
101 # Versions: v3.1.3 - ignore_few_rounds, v3.1.5 - abi3
102 "ssh": ["bcrypt >= 3.1.5"],
103 },
104 # for cffi
105 zip_safe=False,
106 ext_package="cryptography.hazmat.bindings",
107 cffi_modules=[
108 "src/_cffi_src/build_openssl.py:ffi",
109 "src/_cffi_src/build_padding.py:ffi",
110 ],
111 )
112 except: # noqa: E722
113 # Note: This is a bare exception that re-raises so that we don't interfere
114 # with anything the installation machinery might want to do. Because we
115 # print this for any exception this msg can appear (e.g. in verbose logs)
116 # even if there's no failure. For example, SetupRequirementsError is raised
117 # during PEP517 building and prints this text. setuptools raises SystemExit
118 # when compilation fails right now, but it's possible this isn't stable
119 # or a public API commitment so we'll remain ultra conservative.
120 print(
121 """
122 =============================DEBUG ASSISTANCE=============================
123 If you are seeing a compilation error please try the following steps to
124 successfully install cryptography:
125 1) Upgrade to the latest pip and try again. This will fix errors for most
126 users. See: https://pip.pypa.io/en/stable/installing/#upgrading-pip
127 2) Read https://cryptography.io/en/latest/installation.html for specific
128 instructions for your platform.
129 3) Check our frequently asked questions for more information:
130 https://cryptography.io/en/latest/faq.html
131 =============================DEBUG ASSISTANCE=============================
132 """
133 )
134 raise
135
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -72,7 +72,7 @@
setup_requires=setup_requirements,
extras_require={
"test": [
- "pytest>=4.4.0",
+ "pytest>=6.0",
"pytest-cov",
"pytest-subtests",
"pytest-xdist",
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -72,7 +72,7 @@\n setup_requires=setup_requirements,\n extras_require={\n \"test\": [\n- \"pytest>=4.4.0\",\n+ \"pytest>=6.0\",\n \"pytest-cov\",\n \"pytest-subtests\",\n \"pytest-xdist\",\n", "issue": "Testing with tar bundle complains about unknown pytest markers\nThe source bundle ``cryptography-3.3.1.tar.gz`` does not include tox.ini. The ini file is also used to define pytest markers and pytest configuration:\r\n\r\n```\r\n[pytest]\r\naddopts = -r s\r\nmarkers =\r\n requires_backend_interface: this test requires a specific backend interface\r\n skip_fips: this test is not executed in FIPS mode\r\n supported: parametrized test requiring only_if and skip_message\r\n wycheproof_tests: this test runs a wycheproof fixture\r\n\r\n```\r\n\r\npytest complains about unknown markers. Downstream tests with the source bundle is flooding the log file with warnings like:\r\n\r\n```\r\ntests/test_fernet.py:43\r\n /builddir/build/BUILD/cryptography-3.3.1/tests/test_fernet.py:43: PytestUnknownMarkWarning: Unknown pytest.mark.requires_backend_interface - is this a typo? You can register custom marks to avoid this warning - for details, see https://docs.pytest.org/en/stable/mark.html\r\n @pytest.mark.requires_backend_interface(interface=CipherBackend)\r\n```\r\n\r\nSuggested solutions:\r\n\r\n* ship ``tox.ini`` in source distribution\r\n* Move marker definitions to ``pytest.ini`` and ship that file\r\n* Move marker definitions to ``pyproject.toml``\r\n* Define markers on conftest.py\r\n\r\n```\r\ndef pytest_configure(config):\r\n config.addinivalue_line(\"markers\", \"requires_backend_interface: this test requires a specific backend interface\")\r\n```\n", "before_files": [{"content": "#!/usr/bin/env python\n\n# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nimport os\nimport sys\n\nfrom setuptools import find_packages, setup\n\n\nbase_dir = os.path.dirname(__file__)\nsrc_dir = os.path.join(base_dir, \"src\")\n\n# When executing the setup.py, we need to be able to import ourselves, this\n# means that we need to add the src/ directory to the sys.path.\nsys.path.insert(0, src_dir)\n\nabout = {}\nwith open(os.path.join(src_dir, \"cryptography\", \"__about__.py\")) as f:\n exec(f.read(), about)\n\n\n# `setup_requirements` must be kept in sync with `pyproject.toml`\nsetup_requirements = [\"cffi>=1.12\"]\n\nwith open(os.path.join(base_dir, \"README.rst\")) as f:\n long_description = f.read()\n\n\ntry:\n setup(\n name=about[\"__title__\"],\n version=about[\"__version__\"],\n description=about[\"__summary__\"],\n long_description=long_description,\n long_description_content_type=\"text/x-rst\",\n license=about[\"__license__\"],\n url=about[\"__uri__\"],\n author=about[\"__author__\"],\n author_email=about[\"__email__\"],\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: Apache Software License\",\n \"License :: OSI Approved :: BSD License\",\n \"Natural Language :: English\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: POSIX\",\n \"Operating System :: POSIX :: BSD\",\n \"Operating System :: POSIX :: Linux\",\n \"Operating System :: Microsoft :: Windows\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n \"Topic :: Security :: Cryptography\",\n ],\n package_dir={\"\": \"src\"},\n packages=find_packages(\n where=\"src\", exclude=[\"_cffi_src\", \"_cffi_src.*\"]\n ),\n include_package_data=True,\n python_requires=\">=3.6\",\n install_requires=setup_requirements,\n setup_requires=setup_requirements,\n extras_require={\n \"test\": [\n \"pytest>=4.4.0\",\n \"pytest-cov\",\n \"pytest-subtests\",\n \"pytest-xdist\",\n \"pretend\",\n \"iso8601\",\n \"pytz\",\n \"hypothesis>=1.11.4,!=3.79.2\",\n ],\n \"docs\": [\n \"sphinx >= 1.6.5,!=1.8.0,!=3.1.0,!=3.1.1\",\n \"sphinx_rtd_theme\",\n ],\n \"docstest\": [\n \"doc8\",\n \"pyenchant >= 1.6.11\",\n \"twine >= 1.12.0\",\n \"sphinxcontrib-spelling >= 4.0.1\",\n ],\n \"pep8test\": [\n \"black\",\n \"flake8\",\n \"flake8-import-order\",\n \"pep8-naming\",\n ],\n # This extra is for OpenSSH private keys that use bcrypt KDF\n # Versions: v3.1.3 - ignore_few_rounds, v3.1.5 - abi3\n \"ssh\": [\"bcrypt >= 3.1.5\"],\n },\n # for cffi\n zip_safe=False,\n ext_package=\"cryptography.hazmat.bindings\",\n cffi_modules=[\n \"src/_cffi_src/build_openssl.py:ffi\",\n \"src/_cffi_src/build_padding.py:ffi\",\n ],\n )\nexcept: # noqa: E722\n # Note: This is a bare exception that re-raises so that we don't interfere\n # with anything the installation machinery might want to do. Because we\n # print this for any exception this msg can appear (e.g. in verbose logs)\n # even if there's no failure. For example, SetupRequirementsError is raised\n # during PEP517 building and prints this text. setuptools raises SystemExit\n # when compilation fails right now, but it's possible this isn't stable\n # or a public API commitment so we'll remain ultra conservative.\n print(\n \"\"\"\n =============================DEBUG ASSISTANCE=============================\n If you are seeing a compilation error please try the following steps to\n successfully install cryptography:\n 1) Upgrade to the latest pip and try again. This will fix errors for most\n users. See: https://pip.pypa.io/en/stable/installing/#upgrading-pip\n 2) Read https://cryptography.io/en/latest/installation.html for specific\n instructions for your platform.\n 3) Check our frequently asked questions for more information:\n https://cryptography.io/en/latest/faq.html\n =============================DEBUG ASSISTANCE=============================\n \"\"\"\n )\n raise\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\n# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nimport os\nimport sys\n\nfrom setuptools import find_packages, setup\n\n\nbase_dir = os.path.dirname(__file__)\nsrc_dir = os.path.join(base_dir, \"src\")\n\n# When executing the setup.py, we need to be able to import ourselves, this\n# means that we need to add the src/ directory to the sys.path.\nsys.path.insert(0, src_dir)\n\nabout = {}\nwith open(os.path.join(src_dir, \"cryptography\", \"__about__.py\")) as f:\n exec(f.read(), about)\n\n\n# `setup_requirements` must be kept in sync with `pyproject.toml`\nsetup_requirements = [\"cffi>=1.12\"]\n\nwith open(os.path.join(base_dir, \"README.rst\")) as f:\n long_description = f.read()\n\n\ntry:\n setup(\n name=about[\"__title__\"],\n version=about[\"__version__\"],\n description=about[\"__summary__\"],\n long_description=long_description,\n long_description_content_type=\"text/x-rst\",\n license=about[\"__license__\"],\n url=about[\"__uri__\"],\n author=about[\"__author__\"],\n author_email=about[\"__email__\"],\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: Apache Software License\",\n \"License :: OSI Approved :: BSD License\",\n \"Natural Language :: English\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: POSIX\",\n \"Operating System :: POSIX :: BSD\",\n \"Operating System :: POSIX :: Linux\",\n \"Operating System :: Microsoft :: Windows\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n \"Topic :: Security :: Cryptography\",\n ],\n package_dir={\"\": \"src\"},\n packages=find_packages(\n where=\"src\", exclude=[\"_cffi_src\", \"_cffi_src.*\"]\n ),\n include_package_data=True,\n python_requires=\">=3.6\",\n install_requires=setup_requirements,\n setup_requires=setup_requirements,\n extras_require={\n \"test\": [\n \"pytest>=6.0\",\n \"pytest-cov\",\n \"pytest-subtests\",\n \"pytest-xdist\",\n \"pretend\",\n \"iso8601\",\n \"pytz\",\n \"hypothesis>=1.11.4,!=3.79.2\",\n ],\n \"docs\": [\n \"sphinx >= 1.6.5,!=1.8.0,!=3.1.0,!=3.1.1\",\n \"sphinx_rtd_theme\",\n ],\n \"docstest\": [\n \"doc8\",\n \"pyenchant >= 1.6.11\",\n \"twine >= 1.12.0\",\n \"sphinxcontrib-spelling >= 4.0.1\",\n ],\n \"pep8test\": [\n \"black\",\n \"flake8\",\n \"flake8-import-order\",\n \"pep8-naming\",\n ],\n # This extra is for OpenSSH private keys that use bcrypt KDF\n # Versions: v3.1.3 - ignore_few_rounds, v3.1.5 - abi3\n \"ssh\": [\"bcrypt >= 3.1.5\"],\n },\n # for cffi\n zip_safe=False,\n ext_package=\"cryptography.hazmat.bindings\",\n cffi_modules=[\n \"src/_cffi_src/build_openssl.py:ffi\",\n \"src/_cffi_src/build_padding.py:ffi\",\n ],\n )\nexcept: # noqa: E722\n # Note: This is a bare exception that re-raises so that we don't interfere\n # with anything the installation machinery might want to do. Because we\n # print this for any exception this msg can appear (e.g. in verbose logs)\n # even if there's no failure. For example, SetupRequirementsError is raised\n # during PEP517 building and prints this text. setuptools raises SystemExit\n # when compilation fails right now, but it's possible this isn't stable\n # or a public API commitment so we'll remain ultra conservative.\n print(\n \"\"\"\n =============================DEBUG ASSISTANCE=============================\n If you are seeing a compilation error please try the following steps to\n successfully install cryptography:\n 1) Upgrade to the latest pip and try again. This will fix errors for most\n users. See: https://pip.pypa.io/en/stable/installing/#upgrading-pip\n 2) Read https://cryptography.io/en/latest/installation.html for specific\n instructions for your platform.\n 3) Check our frequently asked questions for more information:\n https://cryptography.io/en/latest/faq.html\n =============================DEBUG ASSISTANCE=============================\n \"\"\"\n )\n raise\n", "path": "setup.py"}]}
| 2,061 | 86 |
gh_patches_debug_12191
|
rasdani/github-patches
|
git_diff
|
readthedocs__readthedocs.org-1407
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Migration failing on version slugs
Running migrations on #1396, I hit the following error:
```
Traceback (most recent call last):
File "/home/docs/bin/django-admin.py", line 5, in <module>
management.execute_from_command_line()
File "/home/docs/local/lib/python2.7/site-packages/django/core/management/__init__.py", line 399, in execute_from_command_li
ne
utility.execute()
File "/home/docs/local/lib/python2.7/site-packages/django/core/management/__init__.py", line 392, in execute
self.fetch_command(subcommand).run_from_argv(self.argv)
File "/home/docs/local/lib/python2.7/site-packages/django/core/management/base.py", line 242, in run_from_argv
self.execute(*args, **options.__dict__)
File "/home/docs/local/lib/python2.7/site-packages/django/core/management/base.py", line 285, in execute
output = self.handle(*args, **options)
File "/home/docs/local/lib/python2.7/site-packages/south/management/commands/migrate.py", line 111, in handle
ignore_ghosts = ignore_ghosts,
File "/home/docs/local/lib/python2.7/site-packages/south/migration/__init__.py", line 220, in migrate_app
success = migrator.migrate_many(target, workplan, database)
File "/home/docs/local/lib/python2.7/site-packages/south/migration/migrators.py", line 256, in migrate_many
result = migrator.__class__.migrate_many(migrator, target, migrations, database)
File "/home/docs/local/lib/python2.7/site-packages/south/migration/migrators.py", line 331, in migrate_many
result = self.migrate(migration, database)
File "/home/docs/local/lib/python2.7/site-packages/south/migration/migrators.py", line 133, in migrate
result = self.run(migration, database)
File "/home/docs/local/lib/python2.7/site-packages/south/migration/migrators.py", line 114, in run
return self.run_migration(migration, database)
File "/home/docs/local/lib/python2.7/site-packages/south/migration/migrators.py", line 84, in run_migration
migration_function()
File "/home/docs/local/lib/python2.7/site-packages/south/migration/migrators.py", line 60, in <lambda>
return (lambda: direction(orm))
File "/home/docs/checkouts/readthedocs.org/readthedocs/builds/migrations/0024_fix_slugs_with_leading_placeholders.py", line
21, in forwards
version.slug = slug_field.create_slug(version)
File "/home/docs/checkouts/readthedocs.org/readthedocs/builds/version_slug.py", line 145, in create_slug
'Invalid generated slug: {slug}'.format(slug=slug))
AssertionError: Invalid generated slug: v
```
cc @gregmuellegger
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `readthedocs/builds/version_slug.py`
Content:
```
1 """Contains logic for handling version slugs.
2
3 Handling slugs for versions is not too straightforward. We need to allow some
4 characters which are uncommon in usual slugs. They are dots and underscores.
5 Usually we want the slug to be the name of the tag or branch corresponding VCS
6 version. However we need to strip url-destroying characters like slashes.
7
8 So the syntax for version slugs should be:
9
10 * Start with a lowercase ascii char or a digit.
11 * All other characters must be lowercase ascii chars, digits or dots.
12
13 If uniqueness is not met for a slug in a project, we append a dash and a letter
14 starting with ``a``. We keep increasing that letter until we have a unique
15 slug. This is used since using numbers in tags is too common and appending
16 another number would be confusing.
17 """
18
19 import math
20 import re
21 import string
22 from operator import truediv
23 from django.db import models
24 from django.utils.encoding import force_text
25
26
27 # Regex breakdown:
28 # [a-z0-9] -- start with alphanumeric value
29 # [-._a-z0-9] -- allow dash, dot, underscore, digit, lowercase ascii
30 # +? -- allow multiple of those, but be not greedy about the matching
31 # (?: ... ) -- wrap everything so that the pattern cannot escape when used in
32 # regexes.
33 VERSION_SLUG_REGEX = '(?:[a-z0-9][-._a-z0-9]+?)'
34
35
36 class VersionSlugField(models.CharField):
37 """
38 Implementation inspired by ``django_extensions.db.fields.AutoSlugField``.
39 """
40
41 invalid_chars_re = re.compile('[^-._a-z0-9]')
42 leading_punctuation_re = re.compile('^[-._]+')
43 placeholder = '-'
44 fallback_slug = 'unknown'
45 test_pattern = re.compile('^{pattern}$'.format(pattern=VERSION_SLUG_REGEX))
46
47 def __init__(self, *args, **kwargs):
48 kwargs.setdefault('db_index', True)
49
50 populate_from = kwargs.pop('populate_from', None)
51 if populate_from is None:
52 raise ValueError("missing 'populate_from' argument")
53 else:
54 self._populate_from = populate_from
55 super(VersionSlugField, self).__init__(*args, **kwargs)
56
57 def get_queryset(self, model_cls, slug_field):
58 for field, model in model_cls._meta.get_fields_with_model():
59 if model and field == slug_field:
60 return model._default_manager.all()
61 return model_cls._default_manager.all()
62
63 def slugify(self, content):
64 if not content:
65 return ''
66
67 slugified = content.lower()
68 slugified = self.invalid_chars_re.sub(self.placeholder, slugified)
69 slugified = self.leading_punctuation_re.sub('', slugified)
70
71 if not slugified:
72 return self.fallback_slug
73 return slugified
74
75 def uniquifying_suffix(self, iteration):
76 """
77 This creates a suffix based on the number given as ``iteration``. It
78 will return a value encoded as lowercase ascii letter. So we have an
79 alphabet of 26 letters. The returned suffix will be for example ``_yh``
80 where ``yh`` is the encoding of ``iteration``. The length of it will be
81 ``math.log(iteration, 26)``.
82
83 Examples::
84
85 uniquifying_suffix(0) == '_a'
86 uniquifying_suffix(25) == '_z'
87 uniquifying_suffix(26) == '_ba'
88 uniquifying_suffix(52) == '_ca'
89 """
90 alphabet = string.lowercase
91 length = len(alphabet)
92 if iteration == 0:
93 power = 0
94 else:
95 power = int(math.log(iteration, length))
96 current = iteration
97 suffix = ''
98 for exp in reversed(range(0, power + 1)):
99 digit = int(truediv(current, length ** exp))
100 suffix += alphabet[digit]
101 current = current % length ** exp
102 return '_{suffix}'.format(suffix=suffix)
103
104 def create_slug(self, model_instance):
105 # get fields to populate from and slug field to set
106 slug_field = model_instance._meta.get_field(self.attname)
107
108 slug = self.slugify(getattr(model_instance, self._populate_from))
109 next = 0
110
111 # strip slug depending on max_length attribute of the slug field
112 # and clean-up
113 slug_len = slug_field.max_length
114 if slug_len:
115 slug = slug[:slug_len]
116 original_slug = slug
117
118 # exclude the current model instance from the queryset used in finding
119 # the next valid slug
120 queryset = self.get_queryset(model_instance.__class__, slug_field)
121 if model_instance.pk:
122 queryset = queryset.exclude(pk=model_instance.pk)
123
124 # form a kwarg dict used to impliment any unique_together contraints
125 kwargs = {}
126 for params in model_instance._meta.unique_together:
127 if self.attname in params:
128 for param in params:
129 kwargs[param] = getattr(model_instance, param, None)
130 kwargs[self.attname] = slug
131
132 # increases the number while searching for the next valid slug
133 # depending on the given slug, clean-up
134 while not slug or queryset.filter(**kwargs):
135 slug = original_slug
136 end = self.uniquifying_suffix(next)
137 end_len = len(end)
138 if slug_len and len(slug) + end_len > slug_len:
139 slug = slug[:slug_len - end_len]
140 slug = slug + end
141 kwargs[self.attname] = slug
142 next += 1
143
144 assert self.test_pattern.match(slug), (
145 'Invalid generated slug: {slug}'.format(slug=slug))
146 return slug
147
148 def pre_save(self, model_instance, add):
149 value = getattr(model_instance, self.attname)
150 # We only create a new slug if none was set yet.
151 if not value and add:
152 value = force_text(self.create_slug(model_instance))
153 setattr(model_instance, self.attname, value)
154 return value
155
156 def south_field_triple(self):
157 "Returns a suitable description of this field for South."
158 from south.modelsinspector import introspector
159 field_class = '%s.VersionSlugField' % self.__module__
160 args, kwargs = introspector(self)
161 kwargs.update({
162 'populate_from': repr(self._populate_from),
163 })
164 return (field_class, args, kwargs)
165
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/readthedocs/builds/version_slug.py b/readthedocs/builds/version_slug.py
--- a/readthedocs/builds/version_slug.py
+++ b/readthedocs/builds/version_slug.py
@@ -27,10 +27,10 @@
# Regex breakdown:
# [a-z0-9] -- start with alphanumeric value
# [-._a-z0-9] -- allow dash, dot, underscore, digit, lowercase ascii
-# +? -- allow multiple of those, but be not greedy about the matching
+# *? -- allow multiple of those, but be not greedy about the matching
# (?: ... ) -- wrap everything so that the pattern cannot escape when used in
# regexes.
-VERSION_SLUG_REGEX = '(?:[a-z0-9][-._a-z0-9]+?)'
+VERSION_SLUG_REGEX = '(?:[a-z0-9][-._a-z0-9]*?)'
class VersionSlugField(models.CharField):
|
{"golden_diff": "diff --git a/readthedocs/builds/version_slug.py b/readthedocs/builds/version_slug.py\n--- a/readthedocs/builds/version_slug.py\n+++ b/readthedocs/builds/version_slug.py\n@@ -27,10 +27,10 @@\n # Regex breakdown:\n # [a-z0-9] -- start with alphanumeric value\n # [-._a-z0-9] -- allow dash, dot, underscore, digit, lowercase ascii\n-# +? -- allow multiple of those, but be not greedy about the matching\n+# *? -- allow multiple of those, but be not greedy about the matching\n # (?: ... ) -- wrap everything so that the pattern cannot escape when used in\n # regexes.\n-VERSION_SLUG_REGEX = '(?:[a-z0-9][-._a-z0-9]+?)'\n+VERSION_SLUG_REGEX = '(?:[a-z0-9][-._a-z0-9]*?)'\n \n \n class VersionSlugField(models.CharField):\n", "issue": "Migration failing on version slugs\nRunning migrations on #1396, I hit the following error:\n\n```\nTraceback (most recent call last):\n File \"/home/docs/bin/django-admin.py\", line 5, in <module>\n management.execute_from_command_line()\n File \"/home/docs/local/lib/python2.7/site-packages/django/core/management/__init__.py\", line 399, in execute_from_command_li\nne\n utility.execute()\n File \"/home/docs/local/lib/python2.7/site-packages/django/core/management/__init__.py\", line 392, in execute\n self.fetch_command(subcommand).run_from_argv(self.argv)\n File \"/home/docs/local/lib/python2.7/site-packages/django/core/management/base.py\", line 242, in run_from_argv\n self.execute(*args, **options.__dict__)\n File \"/home/docs/local/lib/python2.7/site-packages/django/core/management/base.py\", line 285, in execute\n output = self.handle(*args, **options)\n File \"/home/docs/local/lib/python2.7/site-packages/south/management/commands/migrate.py\", line 111, in handle\n ignore_ghosts = ignore_ghosts,\n File \"/home/docs/local/lib/python2.7/site-packages/south/migration/__init__.py\", line 220, in migrate_app\n success = migrator.migrate_many(target, workplan, database)\n File \"/home/docs/local/lib/python2.7/site-packages/south/migration/migrators.py\", line 256, in migrate_many\n result = migrator.__class__.migrate_many(migrator, target, migrations, database)\n File \"/home/docs/local/lib/python2.7/site-packages/south/migration/migrators.py\", line 331, in migrate_many\n result = self.migrate(migration, database)\n File \"/home/docs/local/lib/python2.7/site-packages/south/migration/migrators.py\", line 133, in migrate\n result = self.run(migration, database)\n File \"/home/docs/local/lib/python2.7/site-packages/south/migration/migrators.py\", line 114, in run\n return self.run_migration(migration, database)\n File \"/home/docs/local/lib/python2.7/site-packages/south/migration/migrators.py\", line 84, in run_migration\n migration_function()\n File \"/home/docs/local/lib/python2.7/site-packages/south/migration/migrators.py\", line 60, in <lambda>\n return (lambda: direction(orm))\n File \"/home/docs/checkouts/readthedocs.org/readthedocs/builds/migrations/0024_fix_slugs_with_leading_placeholders.py\", line\n21, in forwards\n version.slug = slug_field.create_slug(version)\n File \"/home/docs/checkouts/readthedocs.org/readthedocs/builds/version_slug.py\", line 145, in create_slug\n 'Invalid generated slug: {slug}'.format(slug=slug))\nAssertionError: Invalid generated slug: v\n```\n\ncc @gregmuellegger \n\n", "before_files": [{"content": "\"\"\"Contains logic for handling version slugs.\n\nHandling slugs for versions is not too straightforward. We need to allow some\ncharacters which are uncommon in usual slugs. They are dots and underscores.\nUsually we want the slug to be the name of the tag or branch corresponding VCS\nversion. However we need to strip url-destroying characters like slashes.\n\nSo the syntax for version slugs should be:\n\n* Start with a lowercase ascii char or a digit.\n* All other characters must be lowercase ascii chars, digits or dots.\n\nIf uniqueness is not met for a slug in a project, we append a dash and a letter\nstarting with ``a``. We keep increasing that letter until we have a unique\nslug. This is used since using numbers in tags is too common and appending\nanother number would be confusing.\n\"\"\"\n\nimport math\nimport re\nimport string\nfrom operator import truediv\nfrom django.db import models\nfrom django.utils.encoding import force_text\n\n\n# Regex breakdown:\n# [a-z0-9] -- start with alphanumeric value\n# [-._a-z0-9] -- allow dash, dot, underscore, digit, lowercase ascii\n# +? -- allow multiple of those, but be not greedy about the matching\n# (?: ... ) -- wrap everything so that the pattern cannot escape when used in\n# regexes.\nVERSION_SLUG_REGEX = '(?:[a-z0-9][-._a-z0-9]+?)'\n\n\nclass VersionSlugField(models.CharField):\n \"\"\"\n Implementation inspired by ``django_extensions.db.fields.AutoSlugField``.\n \"\"\"\n\n invalid_chars_re = re.compile('[^-._a-z0-9]')\n leading_punctuation_re = re.compile('^[-._]+')\n placeholder = '-'\n fallback_slug = 'unknown'\n test_pattern = re.compile('^{pattern}$'.format(pattern=VERSION_SLUG_REGEX))\n\n def __init__(self, *args, **kwargs):\n kwargs.setdefault('db_index', True)\n\n populate_from = kwargs.pop('populate_from', None)\n if populate_from is None:\n raise ValueError(\"missing 'populate_from' argument\")\n else:\n self._populate_from = populate_from\n super(VersionSlugField, self).__init__(*args, **kwargs)\n\n def get_queryset(self, model_cls, slug_field):\n for field, model in model_cls._meta.get_fields_with_model():\n if model and field == slug_field:\n return model._default_manager.all()\n return model_cls._default_manager.all()\n\n def slugify(self, content):\n if not content:\n return ''\n\n slugified = content.lower()\n slugified = self.invalid_chars_re.sub(self.placeholder, slugified)\n slugified = self.leading_punctuation_re.sub('', slugified)\n\n if not slugified:\n return self.fallback_slug\n return slugified\n\n def uniquifying_suffix(self, iteration):\n \"\"\"\n This creates a suffix based on the number given as ``iteration``. It\n will return a value encoded as lowercase ascii letter. So we have an\n alphabet of 26 letters. The returned suffix will be for example ``_yh``\n where ``yh`` is the encoding of ``iteration``. The length of it will be\n ``math.log(iteration, 26)``.\n\n Examples::\n\n uniquifying_suffix(0) == '_a'\n uniquifying_suffix(25) == '_z'\n uniquifying_suffix(26) == '_ba'\n uniquifying_suffix(52) == '_ca'\n \"\"\"\n alphabet = string.lowercase\n length = len(alphabet)\n if iteration == 0:\n power = 0\n else:\n power = int(math.log(iteration, length))\n current = iteration\n suffix = ''\n for exp in reversed(range(0, power + 1)):\n digit = int(truediv(current, length ** exp))\n suffix += alphabet[digit]\n current = current % length ** exp\n return '_{suffix}'.format(suffix=suffix)\n\n def create_slug(self, model_instance):\n # get fields to populate from and slug field to set\n slug_field = model_instance._meta.get_field(self.attname)\n\n slug = self.slugify(getattr(model_instance, self._populate_from))\n next = 0\n\n # strip slug depending on max_length attribute of the slug field\n # and clean-up\n slug_len = slug_field.max_length\n if slug_len:\n slug = slug[:slug_len]\n original_slug = slug\n\n # exclude the current model instance from the queryset used in finding\n # the next valid slug\n queryset = self.get_queryset(model_instance.__class__, slug_field)\n if model_instance.pk:\n queryset = queryset.exclude(pk=model_instance.pk)\n\n # form a kwarg dict used to impliment any unique_together contraints\n kwargs = {}\n for params in model_instance._meta.unique_together:\n if self.attname in params:\n for param in params:\n kwargs[param] = getattr(model_instance, param, None)\n kwargs[self.attname] = slug\n\n # increases the number while searching for the next valid slug\n # depending on the given slug, clean-up\n while not slug or queryset.filter(**kwargs):\n slug = original_slug\n end = self.uniquifying_suffix(next)\n end_len = len(end)\n if slug_len and len(slug) + end_len > slug_len:\n slug = slug[:slug_len - end_len]\n slug = slug + end\n kwargs[self.attname] = slug\n next += 1\n\n assert self.test_pattern.match(slug), (\n 'Invalid generated slug: {slug}'.format(slug=slug))\n return slug\n\n def pre_save(self, model_instance, add):\n value = getattr(model_instance, self.attname)\n # We only create a new slug if none was set yet.\n if not value and add:\n value = force_text(self.create_slug(model_instance))\n setattr(model_instance, self.attname, value)\n return value\n\n def south_field_triple(self):\n \"Returns a suitable description of this field for South.\"\n from south.modelsinspector import introspector\n field_class = '%s.VersionSlugField' % self.__module__\n args, kwargs = introspector(self)\n kwargs.update({\n 'populate_from': repr(self._populate_from),\n })\n return (field_class, args, kwargs)\n", "path": "readthedocs/builds/version_slug.py"}], "after_files": [{"content": "\"\"\"Contains logic for handling version slugs.\n\nHandling slugs for versions is not too straightforward. We need to allow some\ncharacters which are uncommon in usual slugs. They are dots and underscores.\nUsually we want the slug to be the name of the tag or branch corresponding VCS\nversion. However we need to strip url-destroying characters like slashes.\n\nSo the syntax for version slugs should be:\n\n* Start with a lowercase ascii char or a digit.\n* All other characters must be lowercase ascii chars, digits or dots.\n\nIf uniqueness is not met for a slug in a project, we append a dash and a letter\nstarting with ``a``. We keep increasing that letter until we have a unique\nslug. This is used since using numbers in tags is too common and appending\nanother number would be confusing.\n\"\"\"\n\nimport math\nimport re\nimport string\nfrom operator import truediv\nfrom django.db import models\nfrom django.utils.encoding import force_text\n\n\n# Regex breakdown:\n# [a-z0-9] -- start with alphanumeric value\n# [-._a-z0-9] -- allow dash, dot, underscore, digit, lowercase ascii\n# *? -- allow multiple of those, but be not greedy about the matching\n# (?: ... ) -- wrap everything so that the pattern cannot escape when used in\n# regexes.\nVERSION_SLUG_REGEX = '(?:[a-z0-9][-._a-z0-9]*?)'\n\n\nclass VersionSlugField(models.CharField):\n \"\"\"\n Implementation inspired by ``django_extensions.db.fields.AutoSlugField``.\n \"\"\"\n\n invalid_chars_re = re.compile('[^-._a-z0-9]')\n leading_punctuation_re = re.compile('^[-._]+')\n placeholder = '-'\n fallback_slug = 'unknown'\n test_pattern = re.compile('^{pattern}$'.format(pattern=VERSION_SLUG_REGEX))\n\n def __init__(self, *args, **kwargs):\n kwargs.setdefault('db_index', True)\n\n populate_from = kwargs.pop('populate_from', None)\n if populate_from is None:\n raise ValueError(\"missing 'populate_from' argument\")\n else:\n self._populate_from = populate_from\n super(VersionSlugField, self).__init__(*args, **kwargs)\n\n def get_queryset(self, model_cls, slug_field):\n for field, model in model_cls._meta.get_fields_with_model():\n if model and field == slug_field:\n return model._default_manager.all()\n return model_cls._default_manager.all()\n\n def slugify(self, content):\n if not content:\n return ''\n\n slugified = content.lower()\n slugified = self.invalid_chars_re.sub(self.placeholder, slugified)\n slugified = self.leading_punctuation_re.sub('', slugified)\n\n if not slugified:\n return self.fallback_slug\n return slugified\n\n def uniquifying_suffix(self, iteration):\n \"\"\"\n This creates a suffix based on the number given as ``iteration``. It\n will return a value encoded as lowercase ascii letter. So we have an\n alphabet of 26 letters. The returned suffix will be for example ``_yh``\n where ``yh`` is the encoding of ``iteration``. The length of it will be\n ``math.log(iteration, 26)``.\n\n Examples::\n\n uniquifying_suffix(0) == '_a'\n uniquifying_suffix(25) == '_z'\n uniquifying_suffix(26) == '_ba'\n uniquifying_suffix(52) == '_ca'\n \"\"\"\n alphabet = string.lowercase\n length = len(alphabet)\n if iteration == 0:\n power = 0\n else:\n power = int(math.log(iteration, length))\n current = iteration\n suffix = ''\n for exp in reversed(range(0, power + 1)):\n digit = int(truediv(current, length ** exp))\n suffix += alphabet[digit]\n current = current % length ** exp\n return '_{suffix}'.format(suffix=suffix)\n\n def create_slug(self, model_instance):\n # get fields to populate from and slug field to set\n slug_field = model_instance._meta.get_field(self.attname)\n\n slug = self.slugify(getattr(model_instance, self._populate_from))\n next = 0\n\n # strip slug depending on max_length attribute of the slug field\n # and clean-up\n slug_len = slug_field.max_length\n if slug_len:\n slug = slug[:slug_len]\n original_slug = slug\n\n # exclude the current model instance from the queryset used in finding\n # the next valid slug\n queryset = self.get_queryset(model_instance.__class__, slug_field)\n if model_instance.pk:\n queryset = queryset.exclude(pk=model_instance.pk)\n\n # form a kwarg dict used to impliment any unique_together contraints\n kwargs = {}\n for params in model_instance._meta.unique_together:\n if self.attname in params:\n for param in params:\n kwargs[param] = getattr(model_instance, param, None)\n kwargs[self.attname] = slug\n\n # increases the number while searching for the next valid slug\n # depending on the given slug, clean-up\n while not slug or queryset.filter(**kwargs):\n slug = original_slug\n end = self.uniquifying_suffix(next)\n end_len = len(end)\n if slug_len and len(slug) + end_len > slug_len:\n slug = slug[:slug_len - end_len]\n slug = slug + end\n kwargs[self.attname] = slug\n next += 1\n\n assert self.test_pattern.match(slug), (\n 'Invalid generated slug: {slug}'.format(slug=slug))\n return slug\n\n def pre_save(self, model_instance, add):\n value = getattr(model_instance, self.attname)\n # We only create a new slug if none was set yet.\n if not value and add:\n value = force_text(self.create_slug(model_instance))\n setattr(model_instance, self.attname, value)\n return value\n\n def south_field_triple(self):\n \"Returns a suitable description of this field for South.\"\n from south.modelsinspector import introspector\n field_class = '%s.VersionSlugField' % self.__module__\n args, kwargs = introspector(self)\n kwargs.update({\n 'populate_from': repr(self._populate_from),\n })\n return (field_class, args, kwargs)\n", "path": "readthedocs/builds/version_slug.py"}]}
| 2,715 | 217 |
gh_patches_debug_28596
|
rasdani/github-patches
|
git_diff
|
pypa__pip-3331
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Emit a warning when pip is used on Python 2.6
Python 2.6 is unmaintained by the Python core team, we should encourage users to upgrade.
<!-- Reviewable:start -->
[<img src="https://reviewable.io/review_button.png" height=40 alt="Review on Reviewable"/>](https://reviewable.io/reviews/pypa/pip/3329)
<!-- Reviewable:end -->
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pip/utils/deprecation.py`
Content:
```
1 """
2 A module that implments tooling to enable easy warnings about deprecations.
3 """
4 from __future__ import absolute_import
5
6 import logging
7 import warnings
8
9
10 class PipDeprecationWarning(Warning):
11 pass
12
13
14 class RemovedInPip9Warning(PipDeprecationWarning, DeprecationWarning):
15 pass
16
17
18 class RemovedInPip10Warning(PipDeprecationWarning, PendingDeprecationWarning):
19 pass
20
21
22 DEPRECATIONS = [RemovedInPip9Warning, RemovedInPip10Warning]
23
24
25 # Warnings <-> Logging Integration
26
27
28 _warnings_showwarning = None
29
30
31 def _showwarning(message, category, filename, lineno, file=None, line=None):
32 if file is not None:
33 if _warnings_showwarning is not None:
34 _warnings_showwarning(
35 message, category, filename, lineno, file, line,
36 )
37 else:
38 if issubclass(category, PipDeprecationWarning):
39 # We use a specially named logger which will handle all of the
40 # deprecation messages for pip.
41 logger = logging.getLogger("pip.deprecations")
42
43 # This is purposely using the % formatter here instead of letting
44 # the logging module handle the interpolation. This is because we
45 # want it to appear as if someone typed this entire message out.
46 log_message = "DEPRECATION: %s" % message
47
48 # Things that are DeprecationWarnings will be removed in the very
49 # next version of pip. We want these to be more obvious so we
50 # use the ERROR logging level while the PendingDeprecationWarnings
51 # are still have at least 2 versions to go until they are removed
52 # so they can just be warnings.
53 if issubclass(category, DeprecationWarning):
54 logger.error(log_message)
55 else:
56 logger.warning(log_message)
57 else:
58 _warnings_showwarning(
59 message, category, filename, lineno, file, line,
60 )
61
62
63 def install_warning_logger():
64 global _warnings_showwarning
65
66 if _warnings_showwarning is None:
67 _warnings_showwarning = warnings.showwarning
68 warnings.showwarning = _showwarning
69
```
Path: `pip/basecommand.py`
Content:
```
1 """Base Command class, and related routines"""
2 from __future__ import absolute_import
3
4 import logging
5 import os
6 import sys
7 import optparse
8
9 from pip import cmdoptions
10 from pip.index import PackageFinder
11 from pip.locations import running_under_virtualenv
12 from pip.download import PipSession
13 from pip.exceptions import (BadCommand, InstallationError, UninstallationError,
14 CommandError, PreviousBuildDirError)
15
16 from pip.compat import logging_dictConfig
17 from pip.baseparser import ConfigOptionParser, UpdatingDefaultsHelpFormatter
18 from pip.req import InstallRequirement, parse_requirements
19 from pip.status_codes import (
20 SUCCESS, ERROR, UNKNOWN_ERROR, VIRTUALENV_NOT_FOUND,
21 PREVIOUS_BUILD_DIR_ERROR,
22 )
23 from pip.utils import get_prog, normalize_path
24 from pip.utils.logging import IndentingFormatter
25 from pip.utils.outdated import pip_version_check
26
27
28 __all__ = ['Command']
29
30
31 logger = logging.getLogger(__name__)
32
33
34 class Command(object):
35 name = None
36 usage = None
37 hidden = False
38 log_streams = ("ext://sys.stdout", "ext://sys.stderr")
39
40 def __init__(self, isolated=False):
41 parser_kw = {
42 'usage': self.usage,
43 'prog': '%s %s' % (get_prog(), self.name),
44 'formatter': UpdatingDefaultsHelpFormatter(),
45 'add_help_option': False,
46 'name': self.name,
47 'description': self.__doc__,
48 'isolated': isolated,
49 }
50
51 self.parser = ConfigOptionParser(**parser_kw)
52
53 # Commands should add options to this option group
54 optgroup_name = '%s Options' % self.name.capitalize()
55 self.cmd_opts = optparse.OptionGroup(self.parser, optgroup_name)
56
57 # Add the general options
58 gen_opts = cmdoptions.make_option_group(
59 cmdoptions.general_group,
60 self.parser,
61 )
62 self.parser.add_option_group(gen_opts)
63
64 def _build_session(self, options, retries=None, timeout=None):
65 session = PipSession(
66 cache=(
67 normalize_path(os.path.join(options.cache_dir, "http"))
68 if options.cache_dir else None
69 ),
70 retries=retries if retries is not None else options.retries,
71 insecure_hosts=options.trusted_hosts,
72 )
73
74 # Handle custom ca-bundles from the user
75 if options.cert:
76 session.verify = options.cert
77
78 # Handle SSL client certificate
79 if options.client_cert:
80 session.cert = options.client_cert
81
82 # Handle timeouts
83 if options.timeout or timeout:
84 session.timeout = (
85 timeout if timeout is not None else options.timeout
86 )
87
88 # Handle configured proxies
89 if options.proxy:
90 session.proxies = {
91 "http": options.proxy,
92 "https": options.proxy,
93 }
94
95 # Determine if we can prompt the user for authentication or not
96 session.auth.prompting = not options.no_input
97
98 return session
99
100 def parse_args(self, args):
101 # factored out for testability
102 return self.parser.parse_args(args)
103
104 def main(self, args):
105 options, args = self.parse_args(args)
106
107 if options.quiet:
108 if options.quiet == 1:
109 level = "WARNING"
110 if options.quiet == 2:
111 level = "ERROR"
112 else:
113 level = "CRITICAL"
114 elif options.verbose:
115 level = "DEBUG"
116 else:
117 level = "INFO"
118
119 logging_dictConfig({
120 "version": 1,
121 "disable_existing_loggers": False,
122 "filters": {
123 "exclude_warnings": {
124 "()": "pip.utils.logging.MaxLevelFilter",
125 "level": logging.WARNING,
126 },
127 },
128 "formatters": {
129 "indent": {
130 "()": IndentingFormatter,
131 "format": "%(message)s",
132 },
133 },
134 "handlers": {
135 "console": {
136 "level": level,
137 "class": "pip.utils.logging.ColorizedStreamHandler",
138 "stream": self.log_streams[0],
139 "filters": ["exclude_warnings"],
140 "formatter": "indent",
141 },
142 "console_errors": {
143 "level": "WARNING",
144 "class": "pip.utils.logging.ColorizedStreamHandler",
145 "stream": self.log_streams[1],
146 "formatter": "indent",
147 },
148 "user_log": {
149 "level": "DEBUG",
150 "class": "pip.utils.logging.BetterRotatingFileHandler",
151 "filename": options.log or "/dev/null",
152 "delay": True,
153 "formatter": "indent",
154 },
155 },
156 "root": {
157 "level": level,
158 "handlers": list(filter(None, [
159 "console",
160 "console_errors",
161 "user_log" if options.log else None,
162 ])),
163 },
164 # Disable any logging besides WARNING unless we have DEBUG level
165 # logging enabled. These use both pip._vendor and the bare names
166 # for the case where someone unbundles our libraries.
167 "loggers": dict(
168 (
169 name,
170 {
171 "level": (
172 "WARNING"
173 if level in ["INFO", "ERROR"]
174 else "DEBUG"
175 ),
176 },
177 )
178 for name in ["pip._vendor", "distlib", "requests", "urllib3"]
179 ),
180 })
181
182 # TODO: try to get these passing down from the command?
183 # without resorting to os.environ to hold these.
184
185 if options.no_input:
186 os.environ['PIP_NO_INPUT'] = '1'
187
188 if options.exists_action:
189 os.environ['PIP_EXISTS_ACTION'] = ' '.join(options.exists_action)
190
191 if options.require_venv:
192 # If a venv is required check if it can really be found
193 if not running_under_virtualenv():
194 logger.critical(
195 'Could not find an activated virtualenv (required).'
196 )
197 sys.exit(VIRTUALENV_NOT_FOUND)
198
199 try:
200 status = self.run(options, args)
201 # FIXME: all commands should return an exit status
202 # and when it is done, isinstance is not needed anymore
203 if isinstance(status, int):
204 return status
205 except PreviousBuildDirError as exc:
206 logger.critical(str(exc))
207 logger.debug('Exception information:', exc_info=True)
208
209 return PREVIOUS_BUILD_DIR_ERROR
210 except (InstallationError, UninstallationError, BadCommand) as exc:
211 logger.critical(str(exc))
212 logger.debug('Exception information:', exc_info=True)
213
214 return ERROR
215 except CommandError as exc:
216 logger.critical('ERROR: %s', exc)
217 logger.debug('Exception information:', exc_info=True)
218
219 return ERROR
220 except KeyboardInterrupt:
221 logger.critical('Operation cancelled by user')
222 logger.debug('Exception information:', exc_info=True)
223
224 return ERROR
225 except:
226 logger.critical('Exception:', exc_info=True)
227
228 return UNKNOWN_ERROR
229 finally:
230 # Check if we're using the latest version of pip available
231 if (not options.disable_pip_version_check and not
232 getattr(options, "no_index", False)):
233 with self._build_session(
234 options,
235 retries=0,
236 timeout=min(5, options.timeout)) as session:
237 pip_version_check(session)
238
239 return SUCCESS
240
241
242 class RequirementCommand(Command):
243
244 @staticmethod
245 def populate_requirement_set(requirement_set, args, options, finder,
246 session, name, wheel_cache):
247 """
248 Marshal cmd line args into a requirement set.
249 """
250 for filename in options.constraints:
251 for req in parse_requirements(
252 filename,
253 constraint=True, finder=finder, options=options,
254 session=session, wheel_cache=wheel_cache):
255 requirement_set.add_requirement(req)
256
257 for req in args:
258 requirement_set.add_requirement(
259 InstallRequirement.from_line(
260 req, None, isolated=options.isolated_mode,
261 wheel_cache=wheel_cache
262 )
263 )
264
265 for req in options.editables:
266 requirement_set.add_requirement(
267 InstallRequirement.from_editable(
268 req,
269 default_vcs=options.default_vcs,
270 isolated=options.isolated_mode,
271 wheel_cache=wheel_cache
272 )
273 )
274
275 found_req_in_file = False
276 for filename in options.requirements:
277 for req in parse_requirements(
278 filename,
279 finder=finder, options=options, session=session,
280 wheel_cache=wheel_cache):
281 found_req_in_file = True
282 requirement_set.add_requirement(req)
283 # If --require-hashes was a line in a requirements file, tell
284 # RequirementSet about it:
285 requirement_set.require_hashes = options.require_hashes
286
287 if not (args or options.editables or found_req_in_file):
288 opts = {'name': name}
289 if options.find_links:
290 msg = ('You must give at least one requirement to '
291 '%(name)s (maybe you meant "pip %(name)s '
292 '%(links)s"?)' %
293 dict(opts, links=' '.join(options.find_links)))
294 else:
295 msg = ('You must give at least one requirement '
296 'to %(name)s (see "pip help %(name)s")' % opts)
297 logger.warning(msg)
298
299 def _build_package_finder(self, options, session):
300 """
301 Create a package finder appropriate to this requirement command.
302 """
303 index_urls = [options.index_url] + options.extra_index_urls
304 if options.no_index:
305 logger.info('Ignoring indexes: %s', ','.join(index_urls))
306 index_urls = []
307
308 return PackageFinder(
309 find_links=options.find_links,
310 format_control=options.format_control,
311 index_urls=index_urls,
312 trusted_hosts=options.trusted_hosts,
313 allow_all_prereleases=options.pre,
314 process_dependency_links=options.process_dependency_links,
315 session=session,
316 )
317
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pip/basecommand.py b/pip/basecommand.py
--- a/pip/basecommand.py
+++ b/pip/basecommand.py
@@ -5,6 +5,7 @@
import os
import sys
import optparse
+import warnings
from pip import cmdoptions
from pip.index import PackageFinder
@@ -20,7 +21,7 @@
SUCCESS, ERROR, UNKNOWN_ERROR, VIRTUALENV_NOT_FOUND,
PREVIOUS_BUILD_DIR_ERROR,
)
-from pip.utils import get_prog, normalize_path
+from pip.utils import deprecation, get_prog, normalize_path
from pip.utils.logging import IndentingFormatter
from pip.utils.outdated import pip_version_check
@@ -179,6 +180,14 @@
),
})
+ if sys.version_info[:2] == (2, 6):
+ warnings.warn(
+ "Python 2.6 is no longer supported by the Python core team, "
+ "please upgrade your Python. A future version of pip will "
+ "drop support for Python 2.6",
+ deprecation.Python26DeprecationWarning
+ )
+
# TODO: try to get these passing down from the command?
# without resorting to os.environ to hold these.
diff --git a/pip/utils/deprecation.py b/pip/utils/deprecation.py
--- a/pip/utils/deprecation.py
+++ b/pip/utils/deprecation.py
@@ -19,7 +19,15 @@
pass
-DEPRECATIONS = [RemovedInPip9Warning, RemovedInPip10Warning]
+class Python26DeprecationWarning(
+ PipDeprecationWarning, PendingDeprecationWarning
+):
+ pass
+
+
+DEPRECATIONS = [
+ RemovedInPip9Warning, RemovedInPip10Warning, Python26DeprecationWarning
+]
# Warnings <-> Logging Integration
|
{"golden_diff": "diff --git a/pip/basecommand.py b/pip/basecommand.py\n--- a/pip/basecommand.py\n+++ b/pip/basecommand.py\n@@ -5,6 +5,7 @@\n import os\n import sys\n import optparse\n+import warnings\n \n from pip import cmdoptions\n from pip.index import PackageFinder\n@@ -20,7 +21,7 @@\n SUCCESS, ERROR, UNKNOWN_ERROR, VIRTUALENV_NOT_FOUND,\n PREVIOUS_BUILD_DIR_ERROR,\n )\n-from pip.utils import get_prog, normalize_path\n+from pip.utils import deprecation, get_prog, normalize_path\n from pip.utils.logging import IndentingFormatter\n from pip.utils.outdated import pip_version_check\n \n@@ -179,6 +180,14 @@\n ),\n })\n \n+ if sys.version_info[:2] == (2, 6):\n+ warnings.warn(\n+ \"Python 2.6 is no longer supported by the Python core team, \"\n+ \"please upgrade your Python. A future version of pip will \"\n+ \"drop support for Python 2.6\",\n+ deprecation.Python26DeprecationWarning\n+ )\n+\n # TODO: try to get these passing down from the command?\n # without resorting to os.environ to hold these.\n \ndiff --git a/pip/utils/deprecation.py b/pip/utils/deprecation.py\n--- a/pip/utils/deprecation.py\n+++ b/pip/utils/deprecation.py\n@@ -19,7 +19,15 @@\n pass\n \n \n-DEPRECATIONS = [RemovedInPip9Warning, RemovedInPip10Warning]\n+class Python26DeprecationWarning(\n+ PipDeprecationWarning, PendingDeprecationWarning\n+):\n+ pass\n+\n+\n+DEPRECATIONS = [\n+ RemovedInPip9Warning, RemovedInPip10Warning, Python26DeprecationWarning\n+]\n \n \n # Warnings <-> Logging Integration\n", "issue": "Emit a warning when pip is used on Python 2.6\nPython 2.6 is unmaintained by the Python core team, we should encourage users to upgrade.\n\n<!-- Reviewable:start -->\n\n[<img src=\"https://reviewable.io/review_button.png\" height=40 alt=\"Review on Reviewable\"/>](https://reviewable.io/reviews/pypa/pip/3329)\n\n<!-- Reviewable:end -->\n\n", "before_files": [{"content": "\"\"\"\nA module that implments tooling to enable easy warnings about deprecations.\n\"\"\"\nfrom __future__ import absolute_import\n\nimport logging\nimport warnings\n\n\nclass PipDeprecationWarning(Warning):\n pass\n\n\nclass RemovedInPip9Warning(PipDeprecationWarning, DeprecationWarning):\n pass\n\n\nclass RemovedInPip10Warning(PipDeprecationWarning, PendingDeprecationWarning):\n pass\n\n\nDEPRECATIONS = [RemovedInPip9Warning, RemovedInPip10Warning]\n\n\n# Warnings <-> Logging Integration\n\n\n_warnings_showwarning = None\n\n\ndef _showwarning(message, category, filename, lineno, file=None, line=None):\n if file is not None:\n if _warnings_showwarning is not None:\n _warnings_showwarning(\n message, category, filename, lineno, file, line,\n )\n else:\n if issubclass(category, PipDeprecationWarning):\n # We use a specially named logger which will handle all of the\n # deprecation messages for pip.\n logger = logging.getLogger(\"pip.deprecations\")\n\n # This is purposely using the % formatter here instead of letting\n # the logging module handle the interpolation. This is because we\n # want it to appear as if someone typed this entire message out.\n log_message = \"DEPRECATION: %s\" % message\n\n # Things that are DeprecationWarnings will be removed in the very\n # next version of pip. We want these to be more obvious so we\n # use the ERROR logging level while the PendingDeprecationWarnings\n # are still have at least 2 versions to go until they are removed\n # so they can just be warnings.\n if issubclass(category, DeprecationWarning):\n logger.error(log_message)\n else:\n logger.warning(log_message)\n else:\n _warnings_showwarning(\n message, category, filename, lineno, file, line,\n )\n\n\ndef install_warning_logger():\n global _warnings_showwarning\n\n if _warnings_showwarning is None:\n _warnings_showwarning = warnings.showwarning\n warnings.showwarning = _showwarning\n", "path": "pip/utils/deprecation.py"}, {"content": "\"\"\"Base Command class, and related routines\"\"\"\nfrom __future__ import absolute_import\n\nimport logging\nimport os\nimport sys\nimport optparse\n\nfrom pip import cmdoptions\nfrom pip.index import PackageFinder\nfrom pip.locations import running_under_virtualenv\nfrom pip.download import PipSession\nfrom pip.exceptions import (BadCommand, InstallationError, UninstallationError,\n CommandError, PreviousBuildDirError)\n\nfrom pip.compat import logging_dictConfig\nfrom pip.baseparser import ConfigOptionParser, UpdatingDefaultsHelpFormatter\nfrom pip.req import InstallRequirement, parse_requirements\nfrom pip.status_codes import (\n SUCCESS, ERROR, UNKNOWN_ERROR, VIRTUALENV_NOT_FOUND,\n PREVIOUS_BUILD_DIR_ERROR,\n)\nfrom pip.utils import get_prog, normalize_path\nfrom pip.utils.logging import IndentingFormatter\nfrom pip.utils.outdated import pip_version_check\n\n\n__all__ = ['Command']\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass Command(object):\n name = None\n usage = None\n hidden = False\n log_streams = (\"ext://sys.stdout\", \"ext://sys.stderr\")\n\n def __init__(self, isolated=False):\n parser_kw = {\n 'usage': self.usage,\n 'prog': '%s %s' % (get_prog(), self.name),\n 'formatter': UpdatingDefaultsHelpFormatter(),\n 'add_help_option': False,\n 'name': self.name,\n 'description': self.__doc__,\n 'isolated': isolated,\n }\n\n self.parser = ConfigOptionParser(**parser_kw)\n\n # Commands should add options to this option group\n optgroup_name = '%s Options' % self.name.capitalize()\n self.cmd_opts = optparse.OptionGroup(self.parser, optgroup_name)\n\n # Add the general options\n gen_opts = cmdoptions.make_option_group(\n cmdoptions.general_group,\n self.parser,\n )\n self.parser.add_option_group(gen_opts)\n\n def _build_session(self, options, retries=None, timeout=None):\n session = PipSession(\n cache=(\n normalize_path(os.path.join(options.cache_dir, \"http\"))\n if options.cache_dir else None\n ),\n retries=retries if retries is not None else options.retries,\n insecure_hosts=options.trusted_hosts,\n )\n\n # Handle custom ca-bundles from the user\n if options.cert:\n session.verify = options.cert\n\n # Handle SSL client certificate\n if options.client_cert:\n session.cert = options.client_cert\n\n # Handle timeouts\n if options.timeout or timeout:\n session.timeout = (\n timeout if timeout is not None else options.timeout\n )\n\n # Handle configured proxies\n if options.proxy:\n session.proxies = {\n \"http\": options.proxy,\n \"https\": options.proxy,\n }\n\n # Determine if we can prompt the user for authentication or not\n session.auth.prompting = not options.no_input\n\n return session\n\n def parse_args(self, args):\n # factored out for testability\n return self.parser.parse_args(args)\n\n def main(self, args):\n options, args = self.parse_args(args)\n\n if options.quiet:\n if options.quiet == 1:\n level = \"WARNING\"\n if options.quiet == 2:\n level = \"ERROR\"\n else:\n level = \"CRITICAL\"\n elif options.verbose:\n level = \"DEBUG\"\n else:\n level = \"INFO\"\n\n logging_dictConfig({\n \"version\": 1,\n \"disable_existing_loggers\": False,\n \"filters\": {\n \"exclude_warnings\": {\n \"()\": \"pip.utils.logging.MaxLevelFilter\",\n \"level\": logging.WARNING,\n },\n },\n \"formatters\": {\n \"indent\": {\n \"()\": IndentingFormatter,\n \"format\": \"%(message)s\",\n },\n },\n \"handlers\": {\n \"console\": {\n \"level\": level,\n \"class\": \"pip.utils.logging.ColorizedStreamHandler\",\n \"stream\": self.log_streams[0],\n \"filters\": [\"exclude_warnings\"],\n \"formatter\": \"indent\",\n },\n \"console_errors\": {\n \"level\": \"WARNING\",\n \"class\": \"pip.utils.logging.ColorizedStreamHandler\",\n \"stream\": self.log_streams[1],\n \"formatter\": \"indent\",\n },\n \"user_log\": {\n \"level\": \"DEBUG\",\n \"class\": \"pip.utils.logging.BetterRotatingFileHandler\",\n \"filename\": options.log or \"/dev/null\",\n \"delay\": True,\n \"formatter\": \"indent\",\n },\n },\n \"root\": {\n \"level\": level,\n \"handlers\": list(filter(None, [\n \"console\",\n \"console_errors\",\n \"user_log\" if options.log else None,\n ])),\n },\n # Disable any logging besides WARNING unless we have DEBUG level\n # logging enabled. These use both pip._vendor and the bare names\n # for the case where someone unbundles our libraries.\n \"loggers\": dict(\n (\n name,\n {\n \"level\": (\n \"WARNING\"\n if level in [\"INFO\", \"ERROR\"]\n else \"DEBUG\"\n ),\n },\n )\n for name in [\"pip._vendor\", \"distlib\", \"requests\", \"urllib3\"]\n ),\n })\n\n # TODO: try to get these passing down from the command?\n # without resorting to os.environ to hold these.\n\n if options.no_input:\n os.environ['PIP_NO_INPUT'] = '1'\n\n if options.exists_action:\n os.environ['PIP_EXISTS_ACTION'] = ' '.join(options.exists_action)\n\n if options.require_venv:\n # If a venv is required check if it can really be found\n if not running_under_virtualenv():\n logger.critical(\n 'Could not find an activated virtualenv (required).'\n )\n sys.exit(VIRTUALENV_NOT_FOUND)\n\n try:\n status = self.run(options, args)\n # FIXME: all commands should return an exit status\n # and when it is done, isinstance is not needed anymore\n if isinstance(status, int):\n return status\n except PreviousBuildDirError as exc:\n logger.critical(str(exc))\n logger.debug('Exception information:', exc_info=True)\n\n return PREVIOUS_BUILD_DIR_ERROR\n except (InstallationError, UninstallationError, BadCommand) as exc:\n logger.critical(str(exc))\n logger.debug('Exception information:', exc_info=True)\n\n return ERROR\n except CommandError as exc:\n logger.critical('ERROR: %s', exc)\n logger.debug('Exception information:', exc_info=True)\n\n return ERROR\n except KeyboardInterrupt:\n logger.critical('Operation cancelled by user')\n logger.debug('Exception information:', exc_info=True)\n\n return ERROR\n except:\n logger.critical('Exception:', exc_info=True)\n\n return UNKNOWN_ERROR\n finally:\n # Check if we're using the latest version of pip available\n if (not options.disable_pip_version_check and not\n getattr(options, \"no_index\", False)):\n with self._build_session(\n options,\n retries=0,\n timeout=min(5, options.timeout)) as session:\n pip_version_check(session)\n\n return SUCCESS\n\n\nclass RequirementCommand(Command):\n\n @staticmethod\n def populate_requirement_set(requirement_set, args, options, finder,\n session, name, wheel_cache):\n \"\"\"\n Marshal cmd line args into a requirement set.\n \"\"\"\n for filename in options.constraints:\n for req in parse_requirements(\n filename,\n constraint=True, finder=finder, options=options,\n session=session, wheel_cache=wheel_cache):\n requirement_set.add_requirement(req)\n\n for req in args:\n requirement_set.add_requirement(\n InstallRequirement.from_line(\n req, None, isolated=options.isolated_mode,\n wheel_cache=wheel_cache\n )\n )\n\n for req in options.editables:\n requirement_set.add_requirement(\n InstallRequirement.from_editable(\n req,\n default_vcs=options.default_vcs,\n isolated=options.isolated_mode,\n wheel_cache=wheel_cache\n )\n )\n\n found_req_in_file = False\n for filename in options.requirements:\n for req in parse_requirements(\n filename,\n finder=finder, options=options, session=session,\n wheel_cache=wheel_cache):\n found_req_in_file = True\n requirement_set.add_requirement(req)\n # If --require-hashes was a line in a requirements file, tell\n # RequirementSet about it:\n requirement_set.require_hashes = options.require_hashes\n\n if not (args or options.editables or found_req_in_file):\n opts = {'name': name}\n if options.find_links:\n msg = ('You must give at least one requirement to '\n '%(name)s (maybe you meant \"pip %(name)s '\n '%(links)s\"?)' %\n dict(opts, links=' '.join(options.find_links)))\n else:\n msg = ('You must give at least one requirement '\n 'to %(name)s (see \"pip help %(name)s\")' % opts)\n logger.warning(msg)\n\n def _build_package_finder(self, options, session):\n \"\"\"\n Create a package finder appropriate to this requirement command.\n \"\"\"\n index_urls = [options.index_url] + options.extra_index_urls\n if options.no_index:\n logger.info('Ignoring indexes: %s', ','.join(index_urls))\n index_urls = []\n\n return PackageFinder(\n find_links=options.find_links,\n format_control=options.format_control,\n index_urls=index_urls,\n trusted_hosts=options.trusted_hosts,\n allow_all_prereleases=options.pre,\n process_dependency_links=options.process_dependency_links,\n session=session,\n )\n", "path": "pip/basecommand.py"}], "after_files": [{"content": "\"\"\"\nA module that implments tooling to enable easy warnings about deprecations.\n\"\"\"\nfrom __future__ import absolute_import\n\nimport logging\nimport warnings\n\n\nclass PipDeprecationWarning(Warning):\n pass\n\n\nclass RemovedInPip9Warning(PipDeprecationWarning, DeprecationWarning):\n pass\n\n\nclass RemovedInPip10Warning(PipDeprecationWarning, PendingDeprecationWarning):\n pass\n\n\nclass Python26DeprecationWarning(\n PipDeprecationWarning, PendingDeprecationWarning\n):\n pass\n\n\nDEPRECATIONS = [\n RemovedInPip9Warning, RemovedInPip10Warning, Python26DeprecationWarning\n]\n\n\n# Warnings <-> Logging Integration\n\n\n_warnings_showwarning = None\n\n\ndef _showwarning(message, category, filename, lineno, file=None, line=None):\n if file is not None:\n if _warnings_showwarning is not None:\n _warnings_showwarning(\n message, category, filename, lineno, file, line,\n )\n else:\n if issubclass(category, PipDeprecationWarning):\n # We use a specially named logger which will handle all of the\n # deprecation messages for pip.\n logger = logging.getLogger(\"pip.deprecations\")\n\n # This is purposely using the % formatter here instead of letting\n # the logging module handle the interpolation. This is because we\n # want it to appear as if someone typed this entire message out.\n log_message = \"DEPRECATION: %s\" % message\n\n # Things that are DeprecationWarnings will be removed in the very\n # next version of pip. We want these to be more obvious so we\n # use the ERROR logging level while the PendingDeprecationWarnings\n # are still have at least 2 versions to go until they are removed\n # so they can just be warnings.\n if issubclass(category, DeprecationWarning):\n logger.error(log_message)\n else:\n logger.warning(log_message)\n else:\n _warnings_showwarning(\n message, category, filename, lineno, file, line,\n )\n\n\ndef install_warning_logger():\n global _warnings_showwarning\n\n if _warnings_showwarning is None:\n _warnings_showwarning = warnings.showwarning\n warnings.showwarning = _showwarning\n", "path": "pip/utils/deprecation.py"}, {"content": "\"\"\"Base Command class, and related routines\"\"\"\nfrom __future__ import absolute_import\n\nimport logging\nimport os\nimport sys\nimport optparse\nimport warnings\n\nfrom pip import cmdoptions\nfrom pip.index import PackageFinder\nfrom pip.locations import running_under_virtualenv\nfrom pip.download import PipSession\nfrom pip.exceptions import (BadCommand, InstallationError, UninstallationError,\n CommandError, PreviousBuildDirError)\n\nfrom pip.compat import logging_dictConfig\nfrom pip.baseparser import ConfigOptionParser, UpdatingDefaultsHelpFormatter\nfrom pip.req import InstallRequirement, parse_requirements\nfrom pip.status_codes import (\n SUCCESS, ERROR, UNKNOWN_ERROR, VIRTUALENV_NOT_FOUND,\n PREVIOUS_BUILD_DIR_ERROR,\n)\nfrom pip.utils import deprecation, get_prog, normalize_path\nfrom pip.utils.logging import IndentingFormatter\nfrom pip.utils.outdated import pip_version_check\n\n\n__all__ = ['Command']\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass Command(object):\n name = None\n usage = None\n hidden = False\n log_streams = (\"ext://sys.stdout\", \"ext://sys.stderr\")\n\n def __init__(self, isolated=False):\n parser_kw = {\n 'usage': self.usage,\n 'prog': '%s %s' % (get_prog(), self.name),\n 'formatter': UpdatingDefaultsHelpFormatter(),\n 'add_help_option': False,\n 'name': self.name,\n 'description': self.__doc__,\n 'isolated': isolated,\n }\n\n self.parser = ConfigOptionParser(**parser_kw)\n\n # Commands should add options to this option group\n optgroup_name = '%s Options' % self.name.capitalize()\n self.cmd_opts = optparse.OptionGroup(self.parser, optgroup_name)\n\n # Add the general options\n gen_opts = cmdoptions.make_option_group(\n cmdoptions.general_group,\n self.parser,\n )\n self.parser.add_option_group(gen_opts)\n\n def _build_session(self, options, retries=None, timeout=None):\n session = PipSession(\n cache=(\n normalize_path(os.path.join(options.cache_dir, \"http\"))\n if options.cache_dir else None\n ),\n retries=retries if retries is not None else options.retries,\n insecure_hosts=options.trusted_hosts,\n )\n\n # Handle custom ca-bundles from the user\n if options.cert:\n session.verify = options.cert\n\n # Handle SSL client certificate\n if options.client_cert:\n session.cert = options.client_cert\n\n # Handle timeouts\n if options.timeout or timeout:\n session.timeout = (\n timeout if timeout is not None else options.timeout\n )\n\n # Handle configured proxies\n if options.proxy:\n session.proxies = {\n \"http\": options.proxy,\n \"https\": options.proxy,\n }\n\n # Determine if we can prompt the user for authentication or not\n session.auth.prompting = not options.no_input\n\n return session\n\n def parse_args(self, args):\n # factored out for testability\n return self.parser.parse_args(args)\n\n def main(self, args):\n options, args = self.parse_args(args)\n\n if options.quiet:\n if options.quiet == 1:\n level = \"WARNING\"\n if options.quiet == 2:\n level = \"ERROR\"\n else:\n level = \"CRITICAL\"\n elif options.verbose:\n level = \"DEBUG\"\n else:\n level = \"INFO\"\n\n logging_dictConfig({\n \"version\": 1,\n \"disable_existing_loggers\": False,\n \"filters\": {\n \"exclude_warnings\": {\n \"()\": \"pip.utils.logging.MaxLevelFilter\",\n \"level\": logging.WARNING,\n },\n },\n \"formatters\": {\n \"indent\": {\n \"()\": IndentingFormatter,\n \"format\": \"%(message)s\",\n },\n },\n \"handlers\": {\n \"console\": {\n \"level\": level,\n \"class\": \"pip.utils.logging.ColorizedStreamHandler\",\n \"stream\": self.log_streams[0],\n \"filters\": [\"exclude_warnings\"],\n \"formatter\": \"indent\",\n },\n \"console_errors\": {\n \"level\": \"WARNING\",\n \"class\": \"pip.utils.logging.ColorizedStreamHandler\",\n \"stream\": self.log_streams[1],\n \"formatter\": \"indent\",\n },\n \"user_log\": {\n \"level\": \"DEBUG\",\n \"class\": \"pip.utils.logging.BetterRotatingFileHandler\",\n \"filename\": options.log or \"/dev/null\",\n \"delay\": True,\n \"formatter\": \"indent\",\n },\n },\n \"root\": {\n \"level\": level,\n \"handlers\": list(filter(None, [\n \"console\",\n \"console_errors\",\n \"user_log\" if options.log else None,\n ])),\n },\n # Disable any logging besides WARNING unless we have DEBUG level\n # logging enabled. These use both pip._vendor and the bare names\n # for the case where someone unbundles our libraries.\n \"loggers\": dict(\n (\n name,\n {\n \"level\": (\n \"WARNING\"\n if level in [\"INFO\", \"ERROR\"]\n else \"DEBUG\"\n ),\n },\n )\n for name in [\"pip._vendor\", \"distlib\", \"requests\", \"urllib3\"]\n ),\n })\n\n if sys.version_info[:2] == (2, 6):\n warnings.warn(\n \"Python 2.6 is no longer supported by the Python core team, \"\n \"please upgrade your Python. A future version of pip will \"\n \"drop support for Python 2.6\",\n deprecation.Python26DeprecationWarning\n )\n\n # TODO: try to get these passing down from the command?\n # without resorting to os.environ to hold these.\n\n if options.no_input:\n os.environ['PIP_NO_INPUT'] = '1'\n\n if options.exists_action:\n os.environ['PIP_EXISTS_ACTION'] = ' '.join(options.exists_action)\n\n if options.require_venv:\n # If a venv is required check if it can really be found\n if not running_under_virtualenv():\n logger.critical(\n 'Could not find an activated virtualenv (required).'\n )\n sys.exit(VIRTUALENV_NOT_FOUND)\n\n try:\n status = self.run(options, args)\n # FIXME: all commands should return an exit status\n # and when it is done, isinstance is not needed anymore\n if isinstance(status, int):\n return status\n except PreviousBuildDirError as exc:\n logger.critical(str(exc))\n logger.debug('Exception information:', exc_info=True)\n\n return PREVIOUS_BUILD_DIR_ERROR\n except (InstallationError, UninstallationError, BadCommand) as exc:\n logger.critical(str(exc))\n logger.debug('Exception information:', exc_info=True)\n\n return ERROR\n except CommandError as exc:\n logger.critical('ERROR: %s', exc)\n logger.debug('Exception information:', exc_info=True)\n\n return ERROR\n except KeyboardInterrupt:\n logger.critical('Operation cancelled by user')\n logger.debug('Exception information:', exc_info=True)\n\n return ERROR\n except:\n logger.critical('Exception:', exc_info=True)\n\n return UNKNOWN_ERROR\n finally:\n # Check if we're using the latest version of pip available\n if (not options.disable_pip_version_check and not\n getattr(options, \"no_index\", False)):\n with self._build_session(\n options,\n retries=0,\n timeout=min(5, options.timeout)) as session:\n pip_version_check(session)\n\n return SUCCESS\n\n\nclass RequirementCommand(Command):\n\n @staticmethod\n def populate_requirement_set(requirement_set, args, options, finder,\n session, name, wheel_cache):\n \"\"\"\n Marshal cmd line args into a requirement set.\n \"\"\"\n for filename in options.constraints:\n for req in parse_requirements(\n filename,\n constraint=True, finder=finder, options=options,\n session=session, wheel_cache=wheel_cache):\n requirement_set.add_requirement(req)\n\n for req in args:\n requirement_set.add_requirement(\n InstallRequirement.from_line(\n req, None, isolated=options.isolated_mode,\n wheel_cache=wheel_cache\n )\n )\n\n for req in options.editables:\n requirement_set.add_requirement(\n InstallRequirement.from_editable(\n req,\n default_vcs=options.default_vcs,\n isolated=options.isolated_mode,\n wheel_cache=wheel_cache\n )\n )\n\n found_req_in_file = False\n for filename in options.requirements:\n for req in parse_requirements(\n filename,\n finder=finder, options=options, session=session,\n wheel_cache=wheel_cache):\n found_req_in_file = True\n requirement_set.add_requirement(req)\n # If --require-hashes was a line in a requirements file, tell\n # RequirementSet about it:\n requirement_set.require_hashes = options.require_hashes\n\n if not (args or options.editables or found_req_in_file):\n opts = {'name': name}\n if options.find_links:\n msg = ('You must give at least one requirement to '\n '%(name)s (maybe you meant \"pip %(name)s '\n '%(links)s\"?)' %\n dict(opts, links=' '.join(options.find_links)))\n else:\n msg = ('You must give at least one requirement '\n 'to %(name)s (see \"pip help %(name)s\")' % opts)\n logger.warning(msg)\n\n def _build_package_finder(self, options, session):\n \"\"\"\n Create a package finder appropriate to this requirement command.\n \"\"\"\n index_urls = [options.index_url] + options.extra_index_urls\n if options.no_index:\n logger.info('Ignoring indexes: %s', ','.join(index_urls))\n index_urls = []\n\n return PackageFinder(\n find_links=options.find_links,\n format_control=options.format_control,\n index_urls=index_urls,\n trusted_hosts=options.trusted_hosts,\n allow_all_prereleases=options.pre,\n process_dependency_links=options.process_dependency_links,\n session=session,\n )\n", "path": "pip/basecommand.py"}]}
| 3,891 | 427 |
gh_patches_debug_38774
|
rasdani/github-patches
|
git_diff
|
opendatacube__datacube-core-1281
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
datacube dataset index incorrect comparison of geometry
linked issue: https://github.com/opendatacube/odc-tools/issues/479
### Expected behaviour
when performing `--update-if-exists` `geometry.coordinates` and `grid_spatial.projection.valid_data.coordinates` shouldn't mark `unsafe` when the values are identical but the type is a tuple
### Actual behaviour
```
root@546115d5f0ab:/# s3-to-dc --stac --update-if-exists ss3://sentinel-cogs/sentinel-s2-l2a-cogs/4/T/UM/2022/1/S2A_42TUM_20220102_0_L2A/*.json s2_l2a
Usage: s3-to-dc [OPTIONS] URI PRODUCT
Try 's3-to-dc --help' for help.
Error: No such option: --update-if-exist (Possible options: --update, --update-if-exists)
root@546115d5f0ab:/# s3-to-dc --stac --update-if-exists s3://sentinel-cogs/sentinel-s2-l2a-cogs/42/T/UM/2022/1/S2A_42TUM_20220102_0_L2A/*.json s2_l2a
Unsafe change in geometry.coordinates from [[[300001.0, 4590241.0], [300001.0, 4700039.0], [406734.0, 4700039.0], [383003.0, 4620099.0], [373985.0, 4590241.0], [300001.0, 4590241.0]]] to (((300001.0, 4590241.0), (300001.0, 4700039.0), (406734.0, 4700039.0), (383003.0, 4620099.0), (373985.0, 4590241.0), (300001.0, 4590241.0)),)
Unsafe change in grid_spatial.projection.valid_data.coordinates from [[[300001.0, 4590241.0], [300001.0, 4700039.0], [406734.0, 4700039.0], [383003.0, 4620099.0], [373985.0, 4590241.0], [300001.0, 4590241.0]]] to (((300001.0, 4590241.0), (300001.0, 4700039.0), (406734.0, 4700039.0), (383003.0, 4620099.0), (373985.0, 4590241.0), (300001.0, 4590241.0)),)
ERROR:root:Failed to index dataset s3://sentinel-cogs/sentinel-s2-l2a-cogs/42/T/UM/2022/1/S2A_42TUM_20220102_0_L2A/S2A_42TUM_20220102_0_L2A.json with error Updating the dataset raised an exception: Unsafe changes in 22250846-0ba8-5ee5-8dbe-38c2adc1f4ab: geometry.coordinates, grid_spatial.projection.valid_data.coordinates
Traceback (most recent call last):
File "/usr/local/lib/python3.8/dist-packages/odc/apps/dc_tools/utils.py", line 163, in index_update_dataset
dc.index.datasets.update(ds, updates_allowed=updates)
File "/usr/local/lib/python3.8/dist-packages/datacube/index/_datasets.py", line 302, in update
raise ValueError(f"Unsafe changes in {dataset.id}: " + (
ValueError: Unsafe changes in 22250846-0ba8-5ee5-8dbe-38c2adc1f4ab: geometry.coordinates, grid_spatial.projection.valid_data.coordinates
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/local/lib/python3.8/dist-packages/odc/apps/dc_tools/s3_to_dc.py", line 58, in dump_to_odc
index_update_dataset(metadata, uri, dc, doc2ds, update, update_if_exists, allow_unsafe)
File "/usr/local/lib/python3.8/dist-packages/odc/apps/dc_tools/utils.py", line 165, in index_update_dataset
raise IndexingException(
odc.apps.dc_tools.utils.IndexingException: Updating the dataset raised an exception: Unsafe changes in 22250846-0ba8-5ee5-8dbe-38c2adc1f4ab: geometry.coordinates, grid_spatial.projection.valid_data.coordinates
Added 0 datasets and failed 1 datasets.
```
### Steps to reproduce the behaviour
1. add a product https://explorer.digitalearth.africa/products/s2_l2a.odc-product.yaml
2. bring up datacube-index to run `s3-to-dc --stac`, OR
3. create a yaml dataset with the same stac json content
4. index first
5. then run `--update-if-exists`
### Environment information
* Which ``datacube --version`` are you using?
```
root@546115d5f0ab:/# datacube --version
Open Data Cube core, version 1.8.6
```
* What datacube deployment/enviornment are you running against?
- datacube-index docker container
> **Note:** Stale issues will be automatically closed after a period of six months with no activity.
> To ensure critical issues are not closed, tag them with the Github `pinned` tag.
> If you are a community member and not a maintainer please escalate this issue to maintainers via
> [GIS StackExchange](https://gis.stackexchange.com/questions/tagged/open-data-cube) or [Slack](http://slack.opendatacube.org).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `datacube/utils/changes.py`
Content:
```
1 # This file is part of the Open Data Cube, see https://opendatacube.org for more information
2 #
3 # Copyright (c) 2015-2020 ODC Contributors
4 # SPDX-License-Identifier: Apache-2.0
5 """
6 Validation of document/dictionary changes.
7 """
8 from itertools import zip_longest
9 from typing import Any, Callable, List, Mapping, Sequence, Tuple, Union
10
11 # Type that can be checked for changes.
12 # (MyPy approximation without recursive references)
13 Changable = Union[str, int, None, Sequence[Any], Mapping[str, Any]]
14 # More accurate recursive definition:
15 # Changable = Union[str, int, None, Sequence["Changable"], Mapping[str, "Changable"]]
16
17 def contains(v1: Changable, v2: Changable, case_sensitive: bool = False) -> bool:
18 """
19 Check that v1 is a superset of v2.
20
21 For dicts contains(v1[k], v2[k]) for all k in v2
22 For other types v1 == v2
23 v2 None is interpreted as {}
24
25 """
26 if not case_sensitive:
27 if isinstance(v1, str):
28 return isinstance(v2, str) and v1.lower() == v2.lower()
29
30 if isinstance(v1, dict):
31 return v2 is None or (isinstance(v2, dict) and
32 all(contains(v1.get(k, object()), v, case_sensitive=case_sensitive)
33 for k, v in v2.items()))
34
35 return v1 == v2
36
37
38 class MissingSentinel:
39 def __str__(self):
40 return "missing"
41
42 def __repr__(self):
43 return "missing"
44
45
46 MISSING = MissingSentinel()
47
48 # Representation of an offset in a dict structure
49 OffsetElem = Union[str, int]
50 Offset = Tuple[OffsetElem, ...]
51
52 # Representation of a changed value
53 ChangedValue = Union[MissingSentinel, Changable]
54
55 # Representation of a change
56 Change = Tuple[Offset, ChangedValue, ChangedValue]
57
58
59 def get_doc_changes(original: Changable,
60 new: Changable,
61 base_prefix: Offset = ()
62 ) -> List[Change]:
63 """
64 Return a list of `changed fields` between two dict structures.
65
66 A `changed field` is represented by a 3-tuple made up of:
67
68 1. `offset` to the change - a tuple of `item` accessors on the document.
69 2. What is in `original` - Either a single value, a dict or list, or :data:`MISSING`.
70 3. What is in `new`
71
72 If the documents are identical, an empty list is returned.
73
74 :type original: Union[dict, list, int]
75 :rtype: list[(tuple, object, object)]
76
77
78 """
79 changed_fields: List[Change] = []
80 if original == new:
81 return changed_fields
82
83 if isinstance(original, dict) and isinstance(new, dict):
84 all_keys = set(original.keys()).union(new.keys())
85 for key in all_keys:
86 changed_fields.extend(get_doc_changes(original.get(key, MISSING),
87 new.get(key, MISSING),
88 base_prefix + (key,)))
89 elif isinstance(original, list) and isinstance(new, list):
90 for idx, (orig_item, new_item) in enumerate(zip_longest(original, new)):
91 changed_fields.extend(get_doc_changes(orig_item, new_item, base_prefix + (idx, )))
92 else:
93 changed_fields.append((base_prefix, original, new))
94
95 return sorted(changed_fields, key=lambda a: a[0])
96
97
98 class DocumentMismatchError(Exception):
99 pass
100
101
102 def check_doc_unchanged(original: Changable, new: Changable, doc_name: str) -> None:
103 """
104 Raise an error if any fields have been modified on a document.
105
106 :param original: original document
107 :param new: new document to compare against the original
108 :param doc_name: Label used to name the document
109 """
110 changes = get_doc_changes(original, new)
111
112 if changes:
113 raise DocumentMismatchError(
114 '{} differs from stored ({})'.format(
115 doc_name,
116 ', '.join(['{}: {!r}!={!r}'.format('.'.join(map(str, offset)), v1, v2) for offset, v1, v2 in changes])
117 )
118 )
119
120
121 AllowPolicy = Callable[[Offset, Offset, ChangedValue, ChangedValue], bool]
122
123 def allow_truncation(key: Offset, offset: Offset,
124 old_value: ChangedValue, new_value: ChangedValue) -> bool:
125 return bool(offset) and key == offset[:-1] and new_value == MISSING
126
127
128 def allow_extension(key: Offset, offset: Offset,
129 old_value: ChangedValue, new_value: ChangedValue) -> bool:
130 return bool(offset) and key == offset[:-1] and old_value == MISSING
131
132
133 def allow_addition(key: Offset, offset: Offset,
134 old_value: ChangedValue, new_value: ChangedValue) -> bool:
135 return key == offset and old_value == MISSING
136
137
138 def allow_removal(key: Offset, offset: Offset,
139 old_value: ChangedValue, new_value: ChangedValue) -> bool:
140 return key == offset and new_value == MISSING
141
142
143 def allow_any(key: Offset, offset: Offset,
144 old: ChangedValue, new: ChangedValue) -> bool:
145 return True
146
147
148 def classify_changes(changes: List[Change], allowed_changes: Mapping[Offset, AllowPolicy]
149 ) -> Tuple[List[Change], List[Change]]:
150 """
151 Classify list of changes into good(allowed) and bad(not allowed) based on allowed changes.
152
153 :param list[(tuple,object,object)] changes: result of get_doc_changes
154 :param allowed_changes: mapping from key to change policy (subset, superset, any)
155 :return: good_changes, bad_chages
156 """
157 allowed_changes_index = dict(allowed_changes)
158
159 good_changes: List[Change] = []
160 bad_changes: List[Change] = []
161
162 for offset, old_val, new_val in changes:
163 allowance = allowed_changes_index.get(offset)
164 allowance_offset = offset
165 # If no allowance on this leaf, find if any parents have allowances.
166 while allowance is None:
167 if not allowance_offset:
168 break
169
170 allowance_offset = allowance_offset[:-1]
171 allowance = allowed_changes_index.get(allowance_offset)
172
173 if allowance is None:
174 bad_changes.append((offset, old_val, new_val))
175 elif hasattr(allowance, '__call__'):
176 if allowance(allowance_offset, offset, old_val, new_val):
177 good_changes.append((offset, old_val, new_val))
178 else:
179 bad_changes.append((offset, old_val, new_val))
180 else:
181 raise RuntimeError('Unknown change type: expecting validation function at %r' % offset)
182
183 return good_changes, bad_changes
184
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/datacube/utils/changes.py b/datacube/utils/changes.py
--- a/datacube/utils/changes.py
+++ b/datacube/utils/changes.py
@@ -5,6 +5,8 @@
"""
Validation of document/dictionary changes.
"""
+import numpy
+
from itertools import zip_longest
from typing import Any, Callable, List, Mapping, Sequence, Tuple, Union
@@ -14,6 +16,7 @@
# More accurate recursive definition:
# Changable = Union[str, int, None, Sequence["Changable"], Mapping[str, "Changable"]]
+
def contains(v1: Changable, v2: Changable, case_sensitive: bool = False) -> bool:
"""
Check that v1 is a superset of v2.
@@ -59,7 +62,7 @@
def get_doc_changes(original: Changable,
new: Changable,
base_prefix: Offset = ()
- ) -> List[Change]:
+ ) -> List[Change]:
"""
Return a list of `changed fields` between two dict structures.
@@ -89,6 +92,9 @@
elif isinstance(original, list) and isinstance(new, list):
for idx, (orig_item, new_item) in enumerate(zip_longest(original, new)):
changed_fields.extend(get_doc_changes(orig_item, new_item, base_prefix + (idx, )))
+ elif isinstance(original, tuple) or isinstance(new, tuple):
+ if not numpy.array_equal(original, new):
+ changed_fields.append((base_prefix, original, new))
else:
changed_fields.append((base_prefix, original, new))
@@ -120,6 +126,7 @@
AllowPolicy = Callable[[Offset, Offset, ChangedValue, ChangedValue], bool]
+
def allow_truncation(key: Offset, offset: Offset,
old_value: ChangedValue, new_value: ChangedValue) -> bool:
return bool(offset) and key == offset[:-1] and new_value == MISSING
@@ -146,7 +153,7 @@
def classify_changes(changes: List[Change], allowed_changes: Mapping[Offset, AllowPolicy]
- ) -> Tuple[List[Change], List[Change]]:
+ ) -> Tuple[List[Change], List[Change]]:
"""
Classify list of changes into good(allowed) and bad(not allowed) based on allowed changes.
|
{"golden_diff": "diff --git a/datacube/utils/changes.py b/datacube/utils/changes.py\n--- a/datacube/utils/changes.py\n+++ b/datacube/utils/changes.py\n@@ -5,6 +5,8 @@\n \"\"\"\n Validation of document/dictionary changes.\n \"\"\"\n+import numpy\n+\n from itertools import zip_longest\n from typing import Any, Callable, List, Mapping, Sequence, Tuple, Union\n \n@@ -14,6 +16,7 @@\n # More accurate recursive definition:\n # Changable = Union[str, int, None, Sequence[\"Changable\"], Mapping[str, \"Changable\"]]\n \n+\n def contains(v1: Changable, v2: Changable, case_sensitive: bool = False) -> bool:\n \"\"\"\n Check that v1 is a superset of v2.\n@@ -59,7 +62,7 @@\n def get_doc_changes(original: Changable,\n new: Changable,\n base_prefix: Offset = ()\n- ) -> List[Change]:\n+ ) -> List[Change]:\n \"\"\"\n Return a list of `changed fields` between two dict structures.\n \n@@ -89,6 +92,9 @@\n elif isinstance(original, list) and isinstance(new, list):\n for idx, (orig_item, new_item) in enumerate(zip_longest(original, new)):\n changed_fields.extend(get_doc_changes(orig_item, new_item, base_prefix + (idx, )))\n+ elif isinstance(original, tuple) or isinstance(new, tuple):\n+ if not numpy.array_equal(original, new):\n+ changed_fields.append((base_prefix, original, new))\n else:\n changed_fields.append((base_prefix, original, new))\n \n@@ -120,6 +126,7 @@\n \n AllowPolicy = Callable[[Offset, Offset, ChangedValue, ChangedValue], bool]\n \n+\n def allow_truncation(key: Offset, offset: Offset,\n old_value: ChangedValue, new_value: ChangedValue) -> bool:\n return bool(offset) and key == offset[:-1] and new_value == MISSING\n@@ -146,7 +153,7 @@\n \n \n def classify_changes(changes: List[Change], allowed_changes: Mapping[Offset, AllowPolicy]\n- ) -> Tuple[List[Change], List[Change]]:\n+ ) -> Tuple[List[Change], List[Change]]:\n \"\"\"\n Classify list of changes into good(allowed) and bad(not allowed) based on allowed changes.\n", "issue": "datacube dataset index incorrect comparison of geometry\nlinked issue: https://github.com/opendatacube/odc-tools/issues/479 \r\n\r\n### Expected behaviour\r\nwhen performing `--update-if-exists` `geometry.coordinates` and `grid_spatial.projection.valid_data.coordinates` shouldn't mark `unsafe` when the values are identical but the type is a tuple\r\n\r\n### Actual behaviour\r\n\r\n```\r\nroot@546115d5f0ab:/# s3-to-dc --stac --update-if-exists ss3://sentinel-cogs/sentinel-s2-l2a-cogs/4/T/UM/2022/1/S2A_42TUM_20220102_0_L2A/*.json s2_l2a\r\nUsage: s3-to-dc [OPTIONS] URI PRODUCT\r\nTry 's3-to-dc --help' for help.\r\n\r\nError: No such option: --update-if-exist (Possible options: --update, --update-if-exists)\r\nroot@546115d5f0ab:/# s3-to-dc --stac --update-if-exists s3://sentinel-cogs/sentinel-s2-l2a-cogs/42/T/UM/2022/1/S2A_42TUM_20220102_0_L2A/*.json s2_l2a\r\nUnsafe change in geometry.coordinates from [[[300001.0, 4590241.0], [300001.0, 4700039.0], [406734.0, 4700039.0], [383003.0, 4620099.0], [373985.0, 4590241.0], [300001.0, 4590241.0]]] to (((300001.0, 4590241.0), (300001.0, 4700039.0), (406734.0, 4700039.0), (383003.0, 4620099.0), (373985.0, 4590241.0), (300001.0, 4590241.0)),)\r\nUnsafe change in grid_spatial.projection.valid_data.coordinates from [[[300001.0, 4590241.0], [300001.0, 4700039.0], [406734.0, 4700039.0], [383003.0, 4620099.0], [373985.0, 4590241.0], [300001.0, 4590241.0]]] to (((300001.0, 4590241.0), (300001.0, 4700039.0), (406734.0, 4700039.0), (383003.0, 4620099.0), (373985.0, 4590241.0), (300001.0, 4590241.0)),)\r\nERROR:root:Failed to index dataset s3://sentinel-cogs/sentinel-s2-l2a-cogs/42/T/UM/2022/1/S2A_42TUM_20220102_0_L2A/S2A_42TUM_20220102_0_L2A.json with error Updating the dataset raised an exception: Unsafe changes in 22250846-0ba8-5ee5-8dbe-38c2adc1f4ab: geometry.coordinates, grid_spatial.projection.valid_data.coordinates\r\nTraceback (most recent call last):\r\n File \"/usr/local/lib/python3.8/dist-packages/odc/apps/dc_tools/utils.py\", line 163, in index_update_dataset\r\n dc.index.datasets.update(ds, updates_allowed=updates)\r\n File \"/usr/local/lib/python3.8/dist-packages/datacube/index/_datasets.py\", line 302, in update\r\n raise ValueError(f\"Unsafe changes in {dataset.id}: \" + (\r\nValueError: Unsafe changes in 22250846-0ba8-5ee5-8dbe-38c2adc1f4ab: geometry.coordinates, grid_spatial.projection.valid_data.coordinates\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nTraceback (most recent call last):\r\n File \"/usr/local/lib/python3.8/dist-packages/odc/apps/dc_tools/s3_to_dc.py\", line 58, in dump_to_odc\r\n index_update_dataset(metadata, uri, dc, doc2ds, update, update_if_exists, allow_unsafe)\r\n File \"/usr/local/lib/python3.8/dist-packages/odc/apps/dc_tools/utils.py\", line 165, in index_update_dataset\r\n raise IndexingException(\r\nodc.apps.dc_tools.utils.IndexingException: Updating the dataset raised an exception: Unsafe changes in 22250846-0ba8-5ee5-8dbe-38c2adc1f4ab: geometry.coordinates, grid_spatial.projection.valid_data.coordinates\r\nAdded 0 datasets and failed 1 datasets.\r\n```\r\n\r\n### Steps to reproduce the behaviour\r\n\r\n1. add a product https://explorer.digitalearth.africa/products/s2_l2a.odc-product.yaml\r\n2. bring up datacube-index to run `s3-to-dc --stac`, OR\r\n3. create a yaml dataset with the same stac json content\r\n4. index first\r\n5. then run `--update-if-exists`\r\n\r\n### Environment information\r\n\r\n* Which ``datacube --version`` are you using?\r\n```\r\nroot@546115d5f0ab:/# datacube --version\r\nOpen Data Cube core, version 1.8.6\r\n```\r\n\r\n* What datacube deployment/enviornment are you running against?\r\n- datacube-index docker container\r\n\r\n> **Note:** Stale issues will be automatically closed after a period of six months with no activity. \r\n> To ensure critical issues are not closed, tag them with the Github `pinned` tag.\r\n> If you are a community member and not a maintainer please escalate this issue to maintainers via\r\n> [GIS StackExchange](https://gis.stackexchange.com/questions/tagged/open-data-cube) or [Slack](http://slack.opendatacube.org).\r\n\n", "before_files": [{"content": "# This file is part of the Open Data Cube, see https://opendatacube.org for more information\n#\n# Copyright (c) 2015-2020 ODC Contributors\n# SPDX-License-Identifier: Apache-2.0\n\"\"\"\nValidation of document/dictionary changes.\n\"\"\"\nfrom itertools import zip_longest\nfrom typing import Any, Callable, List, Mapping, Sequence, Tuple, Union\n\n# Type that can be checked for changes.\n# (MyPy approximation without recursive references)\nChangable = Union[str, int, None, Sequence[Any], Mapping[str, Any]]\n# More accurate recursive definition:\n# Changable = Union[str, int, None, Sequence[\"Changable\"], Mapping[str, \"Changable\"]]\n\ndef contains(v1: Changable, v2: Changable, case_sensitive: bool = False) -> bool:\n \"\"\"\n Check that v1 is a superset of v2.\n\n For dicts contains(v1[k], v2[k]) for all k in v2\n For other types v1 == v2\n v2 None is interpreted as {}\n\n \"\"\"\n if not case_sensitive:\n if isinstance(v1, str):\n return isinstance(v2, str) and v1.lower() == v2.lower()\n\n if isinstance(v1, dict):\n return v2 is None or (isinstance(v2, dict) and\n all(contains(v1.get(k, object()), v, case_sensitive=case_sensitive)\n for k, v in v2.items()))\n\n return v1 == v2\n\n\nclass MissingSentinel:\n def __str__(self):\n return \"missing\"\n\n def __repr__(self):\n return \"missing\"\n\n\nMISSING = MissingSentinel()\n\n# Representation of an offset in a dict structure\nOffsetElem = Union[str, int]\nOffset = Tuple[OffsetElem, ...]\n\n# Representation of a changed value\nChangedValue = Union[MissingSentinel, Changable]\n\n# Representation of a change\nChange = Tuple[Offset, ChangedValue, ChangedValue]\n\n\ndef get_doc_changes(original: Changable,\n new: Changable,\n base_prefix: Offset = ()\n ) -> List[Change]:\n \"\"\"\n Return a list of `changed fields` between two dict structures.\n\n A `changed field` is represented by a 3-tuple made up of:\n\n 1. `offset` to the change - a tuple of `item` accessors on the document.\n 2. What is in `original` - Either a single value, a dict or list, or :data:`MISSING`.\n 3. What is in `new`\n\n If the documents are identical, an empty list is returned.\n\n :type original: Union[dict, list, int]\n :rtype: list[(tuple, object, object)]\n\n\n \"\"\"\n changed_fields: List[Change] = []\n if original == new:\n return changed_fields\n\n if isinstance(original, dict) and isinstance(new, dict):\n all_keys = set(original.keys()).union(new.keys())\n for key in all_keys:\n changed_fields.extend(get_doc_changes(original.get(key, MISSING),\n new.get(key, MISSING),\n base_prefix + (key,)))\n elif isinstance(original, list) and isinstance(new, list):\n for idx, (orig_item, new_item) in enumerate(zip_longest(original, new)):\n changed_fields.extend(get_doc_changes(orig_item, new_item, base_prefix + (idx, )))\n else:\n changed_fields.append((base_prefix, original, new))\n\n return sorted(changed_fields, key=lambda a: a[0])\n\n\nclass DocumentMismatchError(Exception):\n pass\n\n\ndef check_doc_unchanged(original: Changable, new: Changable, doc_name: str) -> None:\n \"\"\"\n Raise an error if any fields have been modified on a document.\n\n :param original: original document\n :param new: new document to compare against the original\n :param doc_name: Label used to name the document\n \"\"\"\n changes = get_doc_changes(original, new)\n\n if changes:\n raise DocumentMismatchError(\n '{} differs from stored ({})'.format(\n doc_name,\n ', '.join(['{}: {!r}!={!r}'.format('.'.join(map(str, offset)), v1, v2) for offset, v1, v2 in changes])\n )\n )\n\n\nAllowPolicy = Callable[[Offset, Offset, ChangedValue, ChangedValue], bool]\n\ndef allow_truncation(key: Offset, offset: Offset,\n old_value: ChangedValue, new_value: ChangedValue) -> bool:\n return bool(offset) and key == offset[:-1] and new_value == MISSING\n\n\ndef allow_extension(key: Offset, offset: Offset,\n old_value: ChangedValue, new_value: ChangedValue) -> bool:\n return bool(offset) and key == offset[:-1] and old_value == MISSING\n\n\ndef allow_addition(key: Offset, offset: Offset,\n old_value: ChangedValue, new_value: ChangedValue) -> bool:\n return key == offset and old_value == MISSING\n\n\ndef allow_removal(key: Offset, offset: Offset,\n old_value: ChangedValue, new_value: ChangedValue) -> bool:\n return key == offset and new_value == MISSING\n\n\ndef allow_any(key: Offset, offset: Offset,\n old: ChangedValue, new: ChangedValue) -> bool:\n return True\n\n\ndef classify_changes(changes: List[Change], allowed_changes: Mapping[Offset, AllowPolicy]\n ) -> Tuple[List[Change], List[Change]]:\n \"\"\"\n Classify list of changes into good(allowed) and bad(not allowed) based on allowed changes.\n\n :param list[(tuple,object,object)] changes: result of get_doc_changes\n :param allowed_changes: mapping from key to change policy (subset, superset, any)\n :return: good_changes, bad_chages\n \"\"\"\n allowed_changes_index = dict(allowed_changes)\n\n good_changes: List[Change] = []\n bad_changes: List[Change] = []\n\n for offset, old_val, new_val in changes:\n allowance = allowed_changes_index.get(offset)\n allowance_offset = offset\n # If no allowance on this leaf, find if any parents have allowances.\n while allowance is None:\n if not allowance_offset:\n break\n\n allowance_offset = allowance_offset[:-1]\n allowance = allowed_changes_index.get(allowance_offset)\n\n if allowance is None:\n bad_changes.append((offset, old_val, new_val))\n elif hasattr(allowance, '__call__'):\n if allowance(allowance_offset, offset, old_val, new_val):\n good_changes.append((offset, old_val, new_val))\n else:\n bad_changes.append((offset, old_val, new_val))\n else:\n raise RuntimeError('Unknown change type: expecting validation function at %r' % offset)\n\n return good_changes, bad_changes\n", "path": "datacube/utils/changes.py"}], "after_files": [{"content": "# This file is part of the Open Data Cube, see https://opendatacube.org for more information\n#\n# Copyright (c) 2015-2020 ODC Contributors\n# SPDX-License-Identifier: Apache-2.0\n\"\"\"\nValidation of document/dictionary changes.\n\"\"\"\nimport numpy\n\nfrom itertools import zip_longest\nfrom typing import Any, Callable, List, Mapping, Sequence, Tuple, Union\n\n# Type that can be checked for changes.\n# (MyPy approximation without recursive references)\nChangable = Union[str, int, None, Sequence[Any], Mapping[str, Any]]\n# More accurate recursive definition:\n# Changable = Union[str, int, None, Sequence[\"Changable\"], Mapping[str, \"Changable\"]]\n\n\ndef contains(v1: Changable, v2: Changable, case_sensitive: bool = False) -> bool:\n \"\"\"\n Check that v1 is a superset of v2.\n\n For dicts contains(v1[k], v2[k]) for all k in v2\n For other types v1 == v2\n v2 None is interpreted as {}\n\n \"\"\"\n if not case_sensitive:\n if isinstance(v1, str):\n return isinstance(v2, str) and v1.lower() == v2.lower()\n\n if isinstance(v1, dict):\n return v2 is None or (isinstance(v2, dict) and\n all(contains(v1.get(k, object()), v, case_sensitive=case_sensitive)\n for k, v in v2.items()))\n\n return v1 == v2\n\n\nclass MissingSentinel:\n def __str__(self):\n return \"missing\"\n\n def __repr__(self):\n return \"missing\"\n\n\nMISSING = MissingSentinel()\n\n# Representation of an offset in a dict structure\nOffsetElem = Union[str, int]\nOffset = Tuple[OffsetElem, ...]\n\n# Representation of a changed value\nChangedValue = Union[MissingSentinel, Changable]\n\n# Representation of a change\nChange = Tuple[Offset, ChangedValue, ChangedValue]\n\n\ndef get_doc_changes(original: Changable,\n new: Changable,\n base_prefix: Offset = ()\n ) -> List[Change]:\n \"\"\"\n Return a list of `changed fields` between two dict structures.\n\n A `changed field` is represented by a 3-tuple made up of:\n\n 1. `offset` to the change - a tuple of `item` accessors on the document.\n 2. What is in `original` - Either a single value, a dict or list, or :data:`MISSING`.\n 3. What is in `new`\n\n If the documents are identical, an empty list is returned.\n\n :type original: Union[dict, list, int]\n :rtype: list[(tuple, object, object)]\n\n\n \"\"\"\n changed_fields: List[Change] = []\n if original == new:\n return changed_fields\n\n if isinstance(original, dict) and isinstance(new, dict):\n all_keys = set(original.keys()).union(new.keys())\n for key in all_keys:\n changed_fields.extend(get_doc_changes(original.get(key, MISSING),\n new.get(key, MISSING),\n base_prefix + (key,)))\n elif isinstance(original, list) and isinstance(new, list):\n for idx, (orig_item, new_item) in enumerate(zip_longest(original, new)):\n changed_fields.extend(get_doc_changes(orig_item, new_item, base_prefix + (idx, )))\n elif isinstance(original, tuple) or isinstance(new, tuple):\n if not numpy.array_equal(original, new):\n changed_fields.append((base_prefix, original, new))\n else:\n changed_fields.append((base_prefix, original, new))\n\n return sorted(changed_fields, key=lambda a: a[0])\n\n\nclass DocumentMismatchError(Exception):\n pass\n\n\ndef check_doc_unchanged(original: Changable, new: Changable, doc_name: str) -> None:\n \"\"\"\n Raise an error if any fields have been modified on a document.\n\n :param original: original document\n :param new: new document to compare against the original\n :param doc_name: Label used to name the document\n \"\"\"\n changes = get_doc_changes(original, new)\n\n if changes:\n raise DocumentMismatchError(\n '{} differs from stored ({})'.format(\n doc_name,\n ', '.join(['{}: {!r}!={!r}'.format('.'.join(map(str, offset)), v1, v2) for offset, v1, v2 in changes])\n )\n )\n\n\nAllowPolicy = Callable[[Offset, Offset, ChangedValue, ChangedValue], bool]\n\n\ndef allow_truncation(key: Offset, offset: Offset,\n old_value: ChangedValue, new_value: ChangedValue) -> bool:\n return bool(offset) and key == offset[:-1] and new_value == MISSING\n\n\ndef allow_extension(key: Offset, offset: Offset,\n old_value: ChangedValue, new_value: ChangedValue) -> bool:\n return bool(offset) and key == offset[:-1] and old_value == MISSING\n\n\ndef allow_addition(key: Offset, offset: Offset,\n old_value: ChangedValue, new_value: ChangedValue) -> bool:\n return key == offset and old_value == MISSING\n\n\ndef allow_removal(key: Offset, offset: Offset,\n old_value: ChangedValue, new_value: ChangedValue) -> bool:\n return key == offset and new_value == MISSING\n\n\ndef allow_any(key: Offset, offset: Offset,\n old: ChangedValue, new: ChangedValue) -> bool:\n return True\n\n\ndef classify_changes(changes: List[Change], allowed_changes: Mapping[Offset, AllowPolicy]\n ) -> Tuple[List[Change], List[Change]]:\n \"\"\"\n Classify list of changes into good(allowed) and bad(not allowed) based on allowed changes.\n\n :param list[(tuple,object,object)] changes: result of get_doc_changes\n :param allowed_changes: mapping from key to change policy (subset, superset, any)\n :return: good_changes, bad_chages\n \"\"\"\n allowed_changes_index = dict(allowed_changes)\n\n good_changes: List[Change] = []\n bad_changes: List[Change] = []\n\n for offset, old_val, new_val in changes:\n allowance = allowed_changes_index.get(offset)\n allowance_offset = offset\n # If no allowance on this leaf, find if any parents have allowances.\n while allowance is None:\n if not allowance_offset:\n break\n\n allowance_offset = allowance_offset[:-1]\n allowance = allowed_changes_index.get(allowance_offset)\n\n if allowance is None:\n bad_changes.append((offset, old_val, new_val))\n elif hasattr(allowance, '__call__'):\n if allowance(allowance_offset, offset, old_val, new_val):\n good_changes.append((offset, old_val, new_val))\n else:\n bad_changes.append((offset, old_val, new_val))\n else:\n raise RuntimeError('Unknown change type: expecting validation function at %r' % offset)\n\n return good_changes, bad_changes\n", "path": "datacube/utils/changes.py"}]}
| 3,771 | 519 |
gh_patches_debug_12778
|
rasdani/github-patches
|
git_diff
|
apache__airflow-37402
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Airflow Helm chart fails to deploy with ArgoCD
### Official Helm Chart version
1.12.0 (latest released)
### Apache Airflow version
latest
### Kubernetes Version
1.29
### Helm Chart configuration
No parameters with the simple ArgoCD deployment (5.51.x).
### Docker Image customizations
no
### What happened
ArgoCD fails to deploy with `Unable to create application: application spec for airflow-test is invalid: InvalidSpecError: Unable to generate manifests in : rpc error: code = Unknown desc = illegal filepath in archive: /tmp/d629b287-bcfd-4ca1-b827-33aa871dae80`
### What you think should happen instead
ArgoCD should be able to create application
### How to reproduce
1. Deploy ArgoCD
2. Create a simple ArgoCD application from UI
```
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: 'airflow-test'
spec:
project: default
source:
repoURL: 'https://airflow.apache.org'
targetRevision: 1.12.0
helm:
releaseName: airflow-test
version: v3
chart: airflow
destination:
server: 'https://kubernetes.default.svc'
namespace: airflow-test
syncPolicy:
automated:
prune: true
selfHeal: true
syncOptions:
- CreateNamespace=true
```
### Anything else
Issue in ArgoCD https://github.com/argoproj/argo-cd/issues/17182
### Are you willing to submit PR?
- [x] Yes I am willing to submit a PR!
### Code of Conduct
- [x] I agree to follow this project's [Code of Conduct](https://github.com/apache/airflow/blob/main/CODE_OF_CONDUCT.md)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `dev/breeze/src/airflow_breeze/utils/reproducible.py`
Content:
```
1 #!/usr/bin/env python3
2
3
4 # Licensed to the Apache Software Foundation (ASF) under one
5 # or more contributor license agreements. See the NOTICE file
6 # distributed with this work for additional information
7 # regarding copyright ownership. The ASF licenses this file
8 # to you under the Apache License, Version 2.0 (the
9 # "License"); you may not use this file except in compliance
10 # with the License. You may obtain a copy of the License at
11 #
12 # http://www.apache.org/licenses/LICENSE-2.0
13 #
14 # Unless required by applicable law or agreed to in writing,
15 # software distributed under the License is distributed on an
16 # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
17 # KIND, either express or implied. See the License for the
18 # specific language governing permissions and limitations
19 # under the License.
20
21 # Copyright 2013 The Servo Project Developers.
22 # Copyright 2017 zerolib Developers.
23 #
24 # Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
25 # http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
26 # <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
27 # option. This file may not be copied, modified, or distributed
28 # except according to those terms.
29
30 # This command is a largely vendored-in script from
31 # https://github.com/MuxZeroNet/reproducible/blob/master/reproducible.py
32 from __future__ import annotations
33
34 import contextlib
35 import gzip
36 import itertools
37 import locale
38 import os
39 import shutil
40 import tarfile
41 from argparse import ArgumentParser
42 from pathlib import Path
43 from subprocess import CalledProcessError, CompletedProcess
44
45 from airflow_breeze.utils.path_utils import AIRFLOW_SOURCES_ROOT, OUT_DIR, REPRODUCIBLE_DIR
46 from airflow_breeze.utils.run_utils import run_command
47
48
49 def get_source_date_epoch(path: Path):
50 import yaml
51
52 reproducible_build_yaml = path / "reproducible_build.yaml"
53 reproducible_build_dict = yaml.safe_load(reproducible_build_yaml.read_text())
54 source_date_epoch: int = reproducible_build_dict["source-date-epoch"]
55 return source_date_epoch
56
57
58 @contextlib.contextmanager
59 def cd(new_path: Path):
60 """Context manager for changing the current working directory"""
61 previous_path = os.getcwd()
62 try:
63 os.chdir(new_path.as_posix())
64 yield
65 finally:
66 os.chdir(previous_path)
67
68
69 @contextlib.contextmanager
70 def setlocale(name: str):
71 """Context manager for changing the current locale"""
72 saved_locale = locale.setlocale(locale.LC_ALL)
73 try:
74 yield locale.setlocale(locale.LC_ALL, name)
75 finally:
76 locale.setlocale(locale.LC_ALL, saved_locale)
77
78
79 def repack_deterministically(
80 source_archive: Path, dest_archive: Path, prepend_path=None, timestamp=0
81 ) -> CompletedProcess | CalledProcessError:
82 """Repack a .tar.gz archive in a deterministic (reproducible) manner.
83
84 See https://reproducible-builds.org/docs/archives/ for more details."""
85
86 def reset(tarinfo):
87 """Helper to reset owner/group and modification time for tar entries"""
88 tarinfo.uid = tarinfo.gid = 0
89 tarinfo.uname = tarinfo.gname = "root"
90 tarinfo.mtime = timestamp
91 return tarinfo
92
93 OUT_DIR.mkdir(exist_ok=True)
94 shutil.rmtree(REPRODUCIBLE_DIR, ignore_errors=True)
95 REPRODUCIBLE_DIR.mkdir(exist_ok=True)
96
97 result = run_command(
98 [
99 "tar",
100 "-xf",
101 source_archive.as_posix(),
102 "-C",
103 REPRODUCIBLE_DIR.as_posix(),
104 ],
105 check=False,
106 )
107 if result.returncode != 0:
108 return result
109 dest_archive.unlink(missing_ok=True)
110 result = run_command(
111 [
112 "chmod",
113 "-R",
114 "go=",
115 REPRODUCIBLE_DIR.as_posix(),
116 ],
117 check=False,
118 )
119 with cd(REPRODUCIBLE_DIR):
120 current_dir = "."
121 file_list = [current_dir]
122 for root, dirs, files in os.walk(current_dir):
123 for name in itertools.chain(dirs, files):
124 file_list.append(os.path.join(root, name))
125
126 # Sort file entries with the fixed locale
127 with setlocale("C"):
128 file_list.sort(key=locale.strxfrm)
129
130 # Use a temporary file and atomic rename to avoid partially-formed
131 # packaging (in case of exceptional situations like running out of disk space).
132 temp_file = f"{dest_archive}.temp~"
133 with os.fdopen(os.open(temp_file, os.O_WRONLY | os.O_CREAT, 0o644), "wb") as out_file:
134 with gzip.GzipFile(fileobj=out_file, mtime=0, mode="wb") as gzip_file:
135 with tarfile.open(fileobj=gzip_file, mode="w:") as tar_file: # type: ignore
136 for entry in file_list:
137 arcname = entry
138 if prepend_path is not None:
139 arcname = os.path.normpath(os.path.join(prepend_path, arcname))
140 if arcname.startswith("./"):
141 arcname = arcname[2:]
142 tar_file.add(entry, filter=reset, recursive=False, arcname=arcname)
143 os.rename(temp_file, dest_archive)
144 return result
145
146
147 def main():
148 parser = ArgumentParser()
149 parser.add_argument("-a", "--archive", help="archive to repack")
150 parser.add_argument("-o", "--out", help="archive destination")
151 parser.add_argument("-p", "--prepend", help="prepend path in the archive")
152 parser.add_argument(
153 "-t",
154 "--timestamp",
155 help="timestamp of files",
156 type=int,
157 default=get_source_date_epoch(AIRFLOW_SOURCES_ROOT / "airflow"),
158 )
159
160 args = parser.parse_args()
161
162 if not args.archive or not args.out:
163 error = (
164 "You should provide an archive to repack, and the target "
165 f"archive file name, not {repr((args.archoive, args.out))}"
166 )
167 raise ValueError(error)
168
169 repack_deterministically(
170 source_archive=Path(args.archive),
171 dest_archive=Path(args.out),
172 prepend_path=args.prepend,
173 timestamp=args.timestamp,
174 )
175
176
177 if __name__ == "__main__":
178 main()
179
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/dev/breeze/src/airflow_breeze/utils/reproducible.py b/dev/breeze/src/airflow_breeze/utils/reproducible.py
--- a/dev/breeze/src/airflow_breeze/utils/reproducible.py
+++ b/dev/breeze/src/airflow_breeze/utils/reproducible.py
@@ -137,6 +137,8 @@
arcname = entry
if prepend_path is not None:
arcname = os.path.normpath(os.path.join(prepend_path, arcname))
+ if arcname == ".":
+ continue
if arcname.startswith("./"):
arcname = arcname[2:]
tar_file.add(entry, filter=reset, recursive=False, arcname=arcname)
|
{"golden_diff": "diff --git a/dev/breeze/src/airflow_breeze/utils/reproducible.py b/dev/breeze/src/airflow_breeze/utils/reproducible.py\n--- a/dev/breeze/src/airflow_breeze/utils/reproducible.py\n+++ b/dev/breeze/src/airflow_breeze/utils/reproducible.py\n@@ -137,6 +137,8 @@\n arcname = entry\n if prepend_path is not None:\n arcname = os.path.normpath(os.path.join(prepend_path, arcname))\n+ if arcname == \".\":\n+ continue\n if arcname.startswith(\"./\"):\n arcname = arcname[2:]\n tar_file.add(entry, filter=reset, recursive=False, arcname=arcname)\n", "issue": "Airflow Helm chart fails to deploy with ArgoCD\n### Official Helm Chart version\n\n1.12.0 (latest released)\n\n### Apache Airflow version\n\nlatest\n\n### Kubernetes Version\n\n1.29\n\n### Helm Chart configuration\n\nNo parameters with the simple ArgoCD deployment (5.51.x).\n\n### Docker Image customizations\n\nno\n\n### What happened\n\nArgoCD fails to deploy with `Unable to create application: application spec for airflow-test is invalid: InvalidSpecError: Unable to generate manifests in : rpc error: code = Unknown desc = illegal filepath in archive: /tmp/d629b287-bcfd-4ca1-b827-33aa871dae80`\n\n### What you think should happen instead\n\nArgoCD should be able to create application\n\n### How to reproduce\n\n1. Deploy ArgoCD\n2. Create a simple ArgoCD application from UI\n```\napiVersion: argoproj.io/v1alpha1\nkind: Application\nmetadata:\n name: 'airflow-test'\nspec:\n project: default\n source:\n repoURL: 'https://airflow.apache.org'\n targetRevision: 1.12.0\n helm:\n releaseName: airflow-test\n version: v3\n chart: airflow\n destination:\n server: 'https://kubernetes.default.svc'\n namespace: airflow-test\n syncPolicy:\n automated:\n prune: true\n selfHeal: true\n syncOptions:\n - CreateNamespace=true\n```\n\n### Anything else\n\nIssue in ArgoCD https://github.com/argoproj/argo-cd/issues/17182\n\n### Are you willing to submit PR?\n\n- [x] Yes I am willing to submit a PR!\n\n### Code of Conduct\n\n- [x] I agree to follow this project's [Code of Conduct](https://github.com/apache/airflow/blob/main/CODE_OF_CONDUCT.md)\n\n", "before_files": [{"content": "#!/usr/bin/env python3\n\n\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\n# Copyright 2013 The Servo Project Developers.\n# Copyright 2017 zerolib Developers.\n#\n# Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or\n# http://www.apache.org/licenses/LICENSE-2.0> or the MIT license\n# <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your\n# option. This file may not be copied, modified, or distributed\n# except according to those terms.\n\n# This command is a largely vendored-in script from\n# https://github.com/MuxZeroNet/reproducible/blob/master/reproducible.py\nfrom __future__ import annotations\n\nimport contextlib\nimport gzip\nimport itertools\nimport locale\nimport os\nimport shutil\nimport tarfile\nfrom argparse import ArgumentParser\nfrom pathlib import Path\nfrom subprocess import CalledProcessError, CompletedProcess\n\nfrom airflow_breeze.utils.path_utils import AIRFLOW_SOURCES_ROOT, OUT_DIR, REPRODUCIBLE_DIR\nfrom airflow_breeze.utils.run_utils import run_command\n\n\ndef get_source_date_epoch(path: Path):\n import yaml\n\n reproducible_build_yaml = path / \"reproducible_build.yaml\"\n reproducible_build_dict = yaml.safe_load(reproducible_build_yaml.read_text())\n source_date_epoch: int = reproducible_build_dict[\"source-date-epoch\"]\n return source_date_epoch\n\n\[email protected]\ndef cd(new_path: Path):\n \"\"\"Context manager for changing the current working directory\"\"\"\n previous_path = os.getcwd()\n try:\n os.chdir(new_path.as_posix())\n yield\n finally:\n os.chdir(previous_path)\n\n\[email protected]\ndef setlocale(name: str):\n \"\"\"Context manager for changing the current locale\"\"\"\n saved_locale = locale.setlocale(locale.LC_ALL)\n try:\n yield locale.setlocale(locale.LC_ALL, name)\n finally:\n locale.setlocale(locale.LC_ALL, saved_locale)\n\n\ndef repack_deterministically(\n source_archive: Path, dest_archive: Path, prepend_path=None, timestamp=0\n) -> CompletedProcess | CalledProcessError:\n \"\"\"Repack a .tar.gz archive in a deterministic (reproducible) manner.\n\n See https://reproducible-builds.org/docs/archives/ for more details.\"\"\"\n\n def reset(tarinfo):\n \"\"\"Helper to reset owner/group and modification time for tar entries\"\"\"\n tarinfo.uid = tarinfo.gid = 0\n tarinfo.uname = tarinfo.gname = \"root\"\n tarinfo.mtime = timestamp\n return tarinfo\n\n OUT_DIR.mkdir(exist_ok=True)\n shutil.rmtree(REPRODUCIBLE_DIR, ignore_errors=True)\n REPRODUCIBLE_DIR.mkdir(exist_ok=True)\n\n result = run_command(\n [\n \"tar\",\n \"-xf\",\n source_archive.as_posix(),\n \"-C\",\n REPRODUCIBLE_DIR.as_posix(),\n ],\n check=False,\n )\n if result.returncode != 0:\n return result\n dest_archive.unlink(missing_ok=True)\n result = run_command(\n [\n \"chmod\",\n \"-R\",\n \"go=\",\n REPRODUCIBLE_DIR.as_posix(),\n ],\n check=False,\n )\n with cd(REPRODUCIBLE_DIR):\n current_dir = \".\"\n file_list = [current_dir]\n for root, dirs, files in os.walk(current_dir):\n for name in itertools.chain(dirs, files):\n file_list.append(os.path.join(root, name))\n\n # Sort file entries with the fixed locale\n with setlocale(\"C\"):\n file_list.sort(key=locale.strxfrm)\n\n # Use a temporary file and atomic rename to avoid partially-formed\n # packaging (in case of exceptional situations like running out of disk space).\n temp_file = f\"{dest_archive}.temp~\"\n with os.fdopen(os.open(temp_file, os.O_WRONLY | os.O_CREAT, 0o644), \"wb\") as out_file:\n with gzip.GzipFile(fileobj=out_file, mtime=0, mode=\"wb\") as gzip_file:\n with tarfile.open(fileobj=gzip_file, mode=\"w:\") as tar_file: # type: ignore\n for entry in file_list:\n arcname = entry\n if prepend_path is not None:\n arcname = os.path.normpath(os.path.join(prepend_path, arcname))\n if arcname.startswith(\"./\"):\n arcname = arcname[2:]\n tar_file.add(entry, filter=reset, recursive=False, arcname=arcname)\n os.rename(temp_file, dest_archive)\n return result\n\n\ndef main():\n parser = ArgumentParser()\n parser.add_argument(\"-a\", \"--archive\", help=\"archive to repack\")\n parser.add_argument(\"-o\", \"--out\", help=\"archive destination\")\n parser.add_argument(\"-p\", \"--prepend\", help=\"prepend path in the archive\")\n parser.add_argument(\n \"-t\",\n \"--timestamp\",\n help=\"timestamp of files\",\n type=int,\n default=get_source_date_epoch(AIRFLOW_SOURCES_ROOT / \"airflow\"),\n )\n\n args = parser.parse_args()\n\n if not args.archive or not args.out:\n error = (\n \"You should provide an archive to repack, and the target \"\n f\"archive file name, not {repr((args.archoive, args.out))}\"\n )\n raise ValueError(error)\n\n repack_deterministically(\n source_archive=Path(args.archive),\n dest_archive=Path(args.out),\n prepend_path=args.prepend,\n timestamp=args.timestamp,\n )\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "dev/breeze/src/airflow_breeze/utils/reproducible.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n\n\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\n# Copyright 2013 The Servo Project Developers.\n# Copyright 2017 zerolib Developers.\n#\n# Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or\n# http://www.apache.org/licenses/LICENSE-2.0> or the MIT license\n# <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your\n# option. This file may not be copied, modified, or distributed\n# except according to those terms.\n\n# This command is a largely vendored-in script from\n# https://github.com/MuxZeroNet/reproducible/blob/master/reproducible.py\nfrom __future__ import annotations\n\nimport contextlib\nimport gzip\nimport itertools\nimport locale\nimport os\nimport shutil\nimport tarfile\nfrom argparse import ArgumentParser\nfrom pathlib import Path\nfrom subprocess import CalledProcessError, CompletedProcess\n\nfrom airflow_breeze.utils.path_utils import AIRFLOW_SOURCES_ROOT, OUT_DIR, REPRODUCIBLE_DIR\nfrom airflow_breeze.utils.run_utils import run_command\n\n\ndef get_source_date_epoch(path: Path):\n import yaml\n\n reproducible_build_yaml = path / \"reproducible_build.yaml\"\n reproducible_build_dict = yaml.safe_load(reproducible_build_yaml.read_text())\n source_date_epoch: int = reproducible_build_dict[\"source-date-epoch\"]\n return source_date_epoch\n\n\[email protected]\ndef cd(new_path: Path):\n \"\"\"Context manager for changing the current working directory\"\"\"\n previous_path = os.getcwd()\n try:\n os.chdir(new_path.as_posix())\n yield\n finally:\n os.chdir(previous_path)\n\n\[email protected]\ndef setlocale(name: str):\n \"\"\"Context manager for changing the current locale\"\"\"\n saved_locale = locale.setlocale(locale.LC_ALL)\n try:\n yield locale.setlocale(locale.LC_ALL, name)\n finally:\n locale.setlocale(locale.LC_ALL, saved_locale)\n\n\ndef repack_deterministically(\n source_archive: Path, dest_archive: Path, prepend_path=None, timestamp=0\n) -> CompletedProcess | CalledProcessError:\n \"\"\"Repack a .tar.gz archive in a deterministic (reproducible) manner.\n\n See https://reproducible-builds.org/docs/archives/ for more details.\"\"\"\n\n def reset(tarinfo):\n \"\"\"Helper to reset owner/group and modification time for tar entries\"\"\"\n tarinfo.uid = tarinfo.gid = 0\n tarinfo.uname = tarinfo.gname = \"root\"\n tarinfo.mtime = timestamp\n return tarinfo\n\n OUT_DIR.mkdir(exist_ok=True)\n shutil.rmtree(REPRODUCIBLE_DIR, ignore_errors=True)\n REPRODUCIBLE_DIR.mkdir(exist_ok=True)\n\n result = run_command(\n [\n \"tar\",\n \"-xf\",\n source_archive.as_posix(),\n \"-C\",\n REPRODUCIBLE_DIR.as_posix(),\n ],\n check=False,\n )\n if result.returncode != 0:\n return result\n dest_archive.unlink(missing_ok=True)\n result = run_command(\n [\n \"chmod\",\n \"-R\",\n \"go=\",\n REPRODUCIBLE_DIR.as_posix(),\n ],\n check=False,\n )\n with cd(REPRODUCIBLE_DIR):\n current_dir = \".\"\n file_list = [current_dir]\n for root, dirs, files in os.walk(current_dir):\n for name in itertools.chain(dirs, files):\n file_list.append(os.path.join(root, name))\n\n # Sort file entries with the fixed locale\n with setlocale(\"C\"):\n file_list.sort(key=locale.strxfrm)\n\n # Use a temporary file and atomic rename to avoid partially-formed\n # packaging (in case of exceptional situations like running out of disk space).\n temp_file = f\"{dest_archive}.temp~\"\n with os.fdopen(os.open(temp_file, os.O_WRONLY | os.O_CREAT, 0o644), \"wb\") as out_file:\n with gzip.GzipFile(fileobj=out_file, mtime=0, mode=\"wb\") as gzip_file:\n with tarfile.open(fileobj=gzip_file, mode=\"w:\") as tar_file: # type: ignore\n for entry in file_list:\n arcname = entry\n if prepend_path is not None:\n arcname = os.path.normpath(os.path.join(prepend_path, arcname))\n if arcname == \".\":\n continue\n if arcname.startswith(\"./\"):\n arcname = arcname[2:]\n tar_file.add(entry, filter=reset, recursive=False, arcname=arcname)\n os.rename(temp_file, dest_archive)\n return result\n\n\ndef main():\n parser = ArgumentParser()\n parser.add_argument(\"-a\", \"--archive\", help=\"archive to repack\")\n parser.add_argument(\"-o\", \"--out\", help=\"archive destination\")\n parser.add_argument(\"-p\", \"--prepend\", help=\"prepend path in the archive\")\n parser.add_argument(\n \"-t\",\n \"--timestamp\",\n help=\"timestamp of files\",\n type=int,\n default=get_source_date_epoch(AIRFLOW_SOURCES_ROOT / \"airflow\"),\n )\n\n args = parser.parse_args()\n\n if not args.archive or not args.out:\n error = (\n \"You should provide an archive to repack, and the target \"\n f\"archive file name, not {repr((args.archoive, args.out))}\"\n )\n raise ValueError(error)\n\n repack_deterministically(\n source_archive=Path(args.archive),\n dest_archive=Path(args.out),\n prepend_path=args.prepend,\n timestamp=args.timestamp,\n )\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "dev/breeze/src/airflow_breeze/utils/reproducible.py"}]}
| 2,517 | 161 |
gh_patches_debug_15549
|
rasdani/github-patches
|
git_diff
|
freedomofpress__securedrop-5674
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Dual distro support broke "securedrop-admin verify"
## Description
When adding support for Focal to the configuration tests in #5529, a check of the `MOLECULE_SCENARIO_NAME` environment variable broke `securedrop-admin verify`, where it's not set.
## Steps to Reproduce
On an admin workstation:
- Run `securedrop-admin setup -t`
- Run `securedrop-admin verify`
## Expected Behavior
That the configuration tests would run.
## Actual Behavior
You get an error [here](https://github.com/freedomofpress/securedrop/blob/76d133a7e5962f8d904e507d93e6a61575358eeb/molecule/testinfra/conftest.py#L31) saying `'NoneType' object has no attribute 'endswith'`.
## Comments
Should probably check if it's `None` or just add `""` as the default in the `os.environ.get` call.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `molecule/testinfra/conftest.py`
Content:
```
1 """
2 Configuration for TestInfra test suite for SecureDrop.
3 Handles importing host-specific test vars, so test functions
4 can be reused across multiple hosts, with varied targets.
5
6 Vars should be placed in `testinfra/vars/<hostname>.yml`.
7 """
8
9 import io
10 import os
11 import yaml
12 import testutils
13
14 # The config tests target staging by default. It's possible to override
15 # for e.g. prod, but the associated vars files are not yet ported.
16 target_host = os.environ.get('SECUREDROP_TESTINFRA_TARGET_HOST', 'staging')
17
18
19 def securedrop_import_testinfra_vars(hostname, with_header=False):
20 """
21 Import vars from a YAML file to populate tests with host-specific
22 values used in checks. For instance, the SecureDrop docroot will
23 be under /vagrant in development, but /var/www/securedrop in staging.
24
25 Vars must be stored in `testinfra/vars/<hostname>.yml`.
26 """
27 filepath = os.path.join(os.path.dirname(__file__), "vars", hostname+".yml")
28 with io.open(filepath, 'r') as f:
29 hostvars = yaml.safe_load(f)
30
31 if os.environ.get("MOLECULE_SCENARIO_NAME").endswith("focal"):
32 hostvars['securedrop_venv_site_packages'] = hostvars["securedrop_venv_site_packages"].format("3.8") # noqa: E501
33 hostvars['python_version'] = "3.8"
34 else:
35 hostvars['securedrop_venv_site_packages'] = hostvars["securedrop_venv_site_packages"].format("3.5") # noqa: E501
36 hostvars['python_version'] = "3.5"
37
38 if with_header:
39 hostvars = dict(securedrop_test_vars=hostvars)
40
41 return hostvars
42
43
44 def lookup_molecule_info():
45 """
46 Molecule automatically writes YAML files documenting dynamic host info
47 such as remote IPs. Read that file and pass back the config dict.
48 """
49 molecule_instance_config_path = os.path.abspath(
50 os.environ['MOLECULE_INSTANCE_CONFIG'])
51 with open(molecule_instance_config_path, 'r') as f:
52 molecule_instance_config = yaml.safe_load(f)
53 return molecule_instance_config
54
55
56 class Myvalues:
57 def __init__(self):
58 pass
59
60
61 value = securedrop_import_testinfra_vars(target_host)
62 res = Myvalues()
63 for key, value in value.items():
64 setattr(res, key, value)
65 testutils.securedrop_test_vars = res
66
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/molecule/testinfra/conftest.py b/molecule/testinfra/conftest.py
--- a/molecule/testinfra/conftest.py
+++ b/molecule/testinfra/conftest.py
@@ -28,7 +28,16 @@
with io.open(filepath, 'r') as f:
hostvars = yaml.safe_load(f)
- if os.environ.get("MOLECULE_SCENARIO_NAME").endswith("focal"):
+ # Testing against both Focal and Xenial must be supported for now in both
+ # staging scenarios, and in prod via `USE_FOCAL=1 ./securedrop-admin verify`
+ testing_focal = False
+ scenario_env = "MOLECULE_SCENARIO_NAME"
+ if scenario_env in os.environ and os.environ.get(scenario_env).endswith("focal"):
+ testing_focal = True
+ if "USE_FOCAL" in os.environ:
+ testing_focal = True
+
+ if testing_focal:
hostvars['securedrop_venv_site_packages'] = hostvars["securedrop_venv_site_packages"].format("3.8") # noqa: E501
hostvars['python_version'] = "3.8"
else:
|
{"golden_diff": "diff --git a/molecule/testinfra/conftest.py b/molecule/testinfra/conftest.py\n--- a/molecule/testinfra/conftest.py\n+++ b/molecule/testinfra/conftest.py\n@@ -28,7 +28,16 @@\n with io.open(filepath, 'r') as f:\n hostvars = yaml.safe_load(f)\n \n- if os.environ.get(\"MOLECULE_SCENARIO_NAME\").endswith(\"focal\"):\n+ # Testing against both Focal and Xenial must be supported for now in both\n+ # staging scenarios, and in prod via `USE_FOCAL=1 ./securedrop-admin verify`\n+ testing_focal = False\n+ scenario_env = \"MOLECULE_SCENARIO_NAME\"\n+ if scenario_env in os.environ and os.environ.get(scenario_env).endswith(\"focal\"):\n+ testing_focal = True\n+ if \"USE_FOCAL\" in os.environ:\n+ testing_focal = True\n+\n+ if testing_focal:\n hostvars['securedrop_venv_site_packages'] = hostvars[\"securedrop_venv_site_packages\"].format(\"3.8\") # noqa: E501\n hostvars['python_version'] = \"3.8\"\n else:\n", "issue": "Dual distro support broke \"securedrop-admin verify\"\n## Description\r\n\r\nWhen adding support for Focal to the configuration tests in #5529, a check of the `MOLECULE_SCENARIO_NAME` environment variable broke `securedrop-admin verify`, where it's not set.\r\n\r\n## Steps to Reproduce\r\n\r\nOn an admin workstation:\r\n- Run `securedrop-admin setup -t`\r\n- Run `securedrop-admin verify`\r\n\r\n## Expected Behavior\r\n\r\nThat the configuration tests would run.\r\n\r\n## Actual Behavior\r\n\r\nYou get an error [here](https://github.com/freedomofpress/securedrop/blob/76d133a7e5962f8d904e507d93e6a61575358eeb/molecule/testinfra/conftest.py#L31) saying `'NoneType' object has no attribute 'endswith'`. \r\n\r\n## Comments\r\n\r\nShould probably check if it's `None` or just add `\"\"` as the default in the `os.environ.get` call.\n", "before_files": [{"content": "\"\"\"\nConfiguration for TestInfra test suite for SecureDrop.\nHandles importing host-specific test vars, so test functions\ncan be reused across multiple hosts, with varied targets.\n\nVars should be placed in `testinfra/vars/<hostname>.yml`.\n\"\"\"\n\nimport io\nimport os\nimport yaml\nimport testutils\n\n# The config tests target staging by default. It's possible to override\n# for e.g. prod, but the associated vars files are not yet ported.\ntarget_host = os.environ.get('SECUREDROP_TESTINFRA_TARGET_HOST', 'staging')\n\n\ndef securedrop_import_testinfra_vars(hostname, with_header=False):\n \"\"\"\n Import vars from a YAML file to populate tests with host-specific\n values used in checks. For instance, the SecureDrop docroot will\n be under /vagrant in development, but /var/www/securedrop in staging.\n\n Vars must be stored in `testinfra/vars/<hostname>.yml`.\n \"\"\"\n filepath = os.path.join(os.path.dirname(__file__), \"vars\", hostname+\".yml\")\n with io.open(filepath, 'r') as f:\n hostvars = yaml.safe_load(f)\n\n if os.environ.get(\"MOLECULE_SCENARIO_NAME\").endswith(\"focal\"):\n hostvars['securedrop_venv_site_packages'] = hostvars[\"securedrop_venv_site_packages\"].format(\"3.8\") # noqa: E501\n hostvars['python_version'] = \"3.8\"\n else:\n hostvars['securedrop_venv_site_packages'] = hostvars[\"securedrop_venv_site_packages\"].format(\"3.5\") # noqa: E501\n hostvars['python_version'] = \"3.5\"\n\n if with_header:\n hostvars = dict(securedrop_test_vars=hostvars)\n\n return hostvars\n\n\ndef lookup_molecule_info():\n \"\"\"\n Molecule automatically writes YAML files documenting dynamic host info\n such as remote IPs. Read that file and pass back the config dict.\n \"\"\"\n molecule_instance_config_path = os.path.abspath(\n os.environ['MOLECULE_INSTANCE_CONFIG'])\n with open(molecule_instance_config_path, 'r') as f:\n molecule_instance_config = yaml.safe_load(f)\n return molecule_instance_config\n\n\nclass Myvalues:\n def __init__(self):\n pass\n\n\nvalue = securedrop_import_testinfra_vars(target_host)\nres = Myvalues()\nfor key, value in value.items():\n setattr(res, key, value)\ntestutils.securedrop_test_vars = res\n", "path": "molecule/testinfra/conftest.py"}], "after_files": [{"content": "\"\"\"\nConfiguration for TestInfra test suite for SecureDrop.\nHandles importing host-specific test vars, so test functions\ncan be reused across multiple hosts, with varied targets.\n\nVars should be placed in `testinfra/vars/<hostname>.yml`.\n\"\"\"\n\nimport io\nimport os\nimport yaml\nimport testutils\n\n# The config tests target staging by default. It's possible to override\n# for e.g. prod, but the associated vars files are not yet ported.\ntarget_host = os.environ.get('SECUREDROP_TESTINFRA_TARGET_HOST', 'staging')\n\n\ndef securedrop_import_testinfra_vars(hostname, with_header=False):\n \"\"\"\n Import vars from a YAML file to populate tests with host-specific\n values used in checks. For instance, the SecureDrop docroot will\n be under /vagrant in development, but /var/www/securedrop in staging.\n\n Vars must be stored in `testinfra/vars/<hostname>.yml`.\n \"\"\"\n filepath = os.path.join(os.path.dirname(__file__), \"vars\", hostname+\".yml\")\n with io.open(filepath, 'r') as f:\n hostvars = yaml.safe_load(f)\n\n # Testing against both Focal and Xenial must be supported for now in both\n # staging scenarios, and in prod via `USE_FOCAL=1 ./securedrop-admin verify`\n testing_focal = False\n scenario_env = \"MOLECULE_SCENARIO_NAME\"\n if scenario_env in os.environ and os.environ.get(scenario_env).endswith(\"focal\"):\n testing_focal = True\n if \"USE_FOCAL\" in os.environ:\n testing_focal = True\n\n if testing_focal:\n hostvars['securedrop_venv_site_packages'] = hostvars[\"securedrop_venv_site_packages\"].format(\"3.8\") # noqa: E501\n hostvars['python_version'] = \"3.8\"\n else:\n hostvars['securedrop_venv_site_packages'] = hostvars[\"securedrop_venv_site_packages\"].format(\"3.5\") # noqa: E501\n hostvars['python_version'] = \"3.5\"\n\n if with_header:\n hostvars = dict(securedrop_test_vars=hostvars)\n\n return hostvars\n\n\ndef lookup_molecule_info():\n \"\"\"\n Molecule automatically writes YAML files documenting dynamic host info\n such as remote IPs. Read that file and pass back the config dict.\n \"\"\"\n molecule_instance_config_path = os.path.abspath(\n os.environ['MOLECULE_INSTANCE_CONFIG'])\n with open(molecule_instance_config_path, 'r') as f:\n molecule_instance_config = yaml.safe_load(f)\n return molecule_instance_config\n\n\nclass Myvalues:\n def __init__(self):\n pass\n\n\nvalue = securedrop_import_testinfra_vars(target_host)\nres = Myvalues()\nfor key, value in value.items():\n setattr(res, key, value)\ntestutils.securedrop_test_vars = res\n", "path": "molecule/testinfra/conftest.py"}]}
| 1,159 | 276 |
gh_patches_debug_22141
|
rasdani/github-patches
|
git_diff
|
nautobot__nautobot-2737
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Child locations up to 1 level down are able to have a site
<!--
NOTE: IF YOUR ISSUE DOES NOT FOLLOW THIS TEMPLATE, IT WILL BE CLOSED.
This form is only for reporting reproducible bugs. If you need assistance
with Nautobot installation, or if you have a general question, please start a
discussion instead: https://github.com/nautobot/nautobot/discussions
Please describe the environment in which you are running Nautobot. Be sure
that you are running an unmodified instance of the latest stable release
before submitting a bug report, and that any plugins have been disabled.
-->
### Environment
* Nautobot version (Docker tag too if applicable): 1.4.5
* Python version: 3.8.10
* Database platform, version: Postgres 12.12
* Middleware(s):
<!--
Describe in detail the exact steps that someone else can take to reproduce
this bug using the current stable release of Nautobot. Begin with the
creation of any necessary database objects and call out every operation
being performed explicitly. If reporting a bug in the REST API, be sure to
reconstruct the raw HTTP request(s) being made: Don't rely on a client
library such as pynautobot.
-->
### Steps to Reproduce
1. Create a parent location and attach it to a site
2. Create a child of the above parent, and attach to a site
<!-- What did you expect to happen? -->
### Expected Behavior
I expected to be unable to add a child location to a site as I am unable to do so with child locations two levels down, IE the child of a child location.
<!-- What happened instead? -->
### Observed Behavior
I was able to attach a child location to a site


--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `nautobot/dcim/models/locations.py`
Content:
```
1 from django.contrib.contenttypes.fields import GenericRelation
2 from django.contrib.contenttypes.models import ContentType
3 from django.core.exceptions import ObjectDoesNotExist, ValidationError
4 from django.db import models
5 from django.urls import reverse
6
7 from tree_queries.models import TreeNode
8
9 from nautobot.core.fields import AutoSlugField
10 from nautobot.core.models.generics import OrganizationalModel, PrimaryModel
11 from nautobot.extras.models import StatusModel
12 from nautobot.extras.utils import extras_features, FeatureQuery
13 from nautobot.utilities.fields import NaturalOrderingField
14 from nautobot.utilities.tree_queries import TreeManager
15
16
17 @extras_features(
18 "custom_fields",
19 "custom_links",
20 "custom_validators",
21 "export_templates",
22 "graphql",
23 "relationships",
24 "webhooks",
25 )
26 class LocationType(TreeNode, OrganizationalModel):
27 """
28 Definition of a category of Locations, including its hierarchical relationship to other LocationTypes.
29
30 A LocationType also specifies the content types that can be associated to a Location of this category.
31 For example a "Building" LocationType might allow Prefix and VLANGroup, but not Devices,
32 while a "Room" LocationType might allow Racks and Devices.
33 """
34
35 name = models.CharField(max_length=100, unique=True)
36 slug = AutoSlugField(populate_from="name")
37 description = models.CharField(max_length=200, blank=True)
38 content_types = models.ManyToManyField(
39 to=ContentType,
40 related_name="location_types",
41 verbose_name="Permitted object types",
42 limit_choices_to=FeatureQuery("locations"),
43 help_text="The object type(s) that can be associated to a Location of this type.",
44 )
45
46 objects = TreeManager()
47
48 csv_headers = ["name", "slug", "parent", "description", "content_types"]
49
50 class Meta:
51 ordering = ("name",)
52
53 def __str__(self):
54 return self.name
55
56 def get_absolute_url(self):
57 return reverse("dcim:locationtype", args=[self.slug])
58
59 def to_csv(self):
60 return (
61 self.name,
62 self.slug,
63 self.parent.name if self.parent else None,
64 self.description,
65 ",".join(f"{ct.app_label}.{ct.model}" for ct in self.content_types.order_by("app_label", "model")),
66 )
67
68 def clean(self):
69 """
70 Disallow LocationTypes whose name conflicts with existing location-related models, to avoid confusion.
71
72 In the longer term we will collapse these other models into special cases of LocationType.
73 """
74 super().clean()
75
76 if self.name.lower() in [
77 "region",
78 "regions",
79 "site",
80 "sites",
81 "rackgroup",
82 "rackgroups",
83 "rack group",
84 "rack groups",
85 ]:
86 raise ValidationError({"name": "This name is reserved for future use."})
87
88 @property
89 def display(self):
90 """
91 Include the parent type names as well in order to provide UI clarity.
92 `self.ancestors()` returns all the preceding nodes from the top down.
93 So if we are looking at node C and its node structure is the following:
94 A
95 /
96 B
97 /
98 C
99 This method will return "A → B → C".
100 Note that `self.ancestors()` may throw an `ObjectDoesNotExist` during bulk-delete operations.
101 """
102 display_str = ""
103 try:
104 for ancestor in self.ancestors():
105 display_str += ancestor.name + " → "
106 except ObjectDoesNotExist:
107 pass
108 finally:
109 display_str += self.name
110 return display_str # pylint: disable=lost-exception
111
112
113 @extras_features(
114 "custom_fields",
115 "custom_links",
116 "custom_validators",
117 "export_templates",
118 "graphql",
119 "relationships",
120 "statuses",
121 "webhooks",
122 )
123 class Location(TreeNode, StatusModel, PrimaryModel):
124 """
125 A Location represents an arbitrarily specific geographic location, such as a campus, building, floor, room, etc.
126
127 As presently implemented, Location is an intermediary model between Site and RackGroup - more specific than a Site,
128 less specific (and more broadly applicable) than a RackGroup:
129
130 Region
131 Region
132 Site
133 Location (location_type="Building")
134 Location (location_type="Room")
135 RackGroup
136 Rack
137 Device
138 Device
139 Prefix
140 etc.
141 VLANGroup
142 Prefix
143 etc.
144
145 As such, as presently implemented, every Location either has a parent Location or a "parent" Site.
146
147 In the future, we plan to collapse Region and Site (and likely RackGroup as well) into the Location model.
148 """
149
150 # A Location's name is unique within context of its parent, not globally unique.
151 name = models.CharField(max_length=100, db_index=True)
152 _name = NaturalOrderingField(target_field="name", max_length=100, blank=True, db_index=True)
153 # However a Location's slug *is* globally unique.
154 slug = AutoSlugField(populate_from=["parent__slug", "name"])
155 location_type = models.ForeignKey(
156 to="dcim.LocationType",
157 on_delete=models.PROTECT,
158 related_name="locations",
159 )
160 site = models.ForeignKey(
161 to="dcim.Site",
162 on_delete=models.CASCADE,
163 related_name="locations",
164 blank=True,
165 null=True,
166 )
167 tenant = models.ForeignKey(
168 to="tenancy.Tenant",
169 on_delete=models.PROTECT,
170 related_name="locations",
171 blank=True,
172 null=True,
173 )
174 description = models.CharField(max_length=200, blank=True)
175 images = GenericRelation(to="extras.ImageAttachment")
176
177 objects = TreeManager()
178
179 csv_headers = [
180 "name",
181 "slug",
182 "location_type",
183 "site",
184 "status",
185 "parent",
186 "tenant",
187 "description",
188 ]
189
190 clone_fields = [
191 "location_type",
192 "site",
193 "status",
194 "parent",
195 "tenant",
196 "description",
197 ]
198
199 class Meta:
200 ordering = ("_name",)
201 unique_together = [["parent", "name"]]
202
203 def __str__(self):
204 return self.name
205
206 def get_absolute_url(self):
207 return reverse("dcim:location", args=[self.slug])
208
209 def to_csv(self):
210 return (
211 self.name,
212 self.slug,
213 self.location_type.name,
214 self.site.name if self.site else None,
215 self.get_status_display(),
216 self.parent.name if self.parent else None,
217 self.tenant.name if self.tenant else None,
218 self.description,
219 )
220
221 @property
222 def base_site(self):
223 """The site that this Location belongs to, if any, or that its root ancestor belongs to, if any."""
224 return self.site or self.ancestors().first().site
225
226 @property
227 def display(self):
228 """
229 Location name is unique per parent but not globally unique, so include parent information as context.
230 `self.ancestors()` returns all the preceding nodes from the top down.
231 So if we are looking at node C and its node structure is the following:
232 A
233 /
234 B
235 /
236 C
237 This method will return "A → B → C".
238
239 Note that `self.ancestors()` may throw an `ObjectDoesNotExist` during bulk-delete operations.
240 """
241 display_str = ""
242 try:
243 for ancestor in self.ancestors():
244 display_str += ancestor.name + " → "
245 except ObjectDoesNotExist:
246 pass
247 finally:
248 display_str += self.name
249 return display_str # pylint: disable=lost-exception
250
251 def validate_unique(self, exclude=None):
252 # Check for a duplicate name on a Location with no parent.
253 # This is necessary because Django does not consider two NULL fields to be equal.
254 if self.parent is None:
255 if Location.objects.exclude(pk=self.pk).filter(parent__isnull=True, name=self.name).exists():
256 raise ValidationError({"name": "A root-level location with this name already exists."})
257
258 super().validate_unique(exclude=exclude)
259
260 def clean(self):
261 super().clean()
262
263 # Prevent changing location type as that would require a whole bunch of cascading logic checks,
264 # e.g. what if the new type doesn't allow all of the associated objects that the old type did?
265 if self.present_in_database and self.location_type != Location.objects.get(pk=self.pk).location_type:
266 raise ValidationError({"location_type": "Changing the type of an existing Location is not permitted."})
267
268 if self.location_type.parent is not None:
269 # We must have a parent and it must match the parent location_type.
270 if self.parent is None or self.parent.location_type != self.location_type.parent:
271 raise ValidationError(
272 {
273 "parent": f"A Location of type {self.location_type} must have "
274 f"a parent Location of type {self.location_type.parent}"
275 }
276 )
277 # We must *not* have a site.
278 # In a future release, Site will become a kind of Location, and the resulting data migration will be
279 # much cleaner if it doesn't have to deal with Locations that have two "parents".
280 if self.site is not None:
281 raise ValidationError(
282 {"site": f"A location of type {self.location_type} must not have an associated Site."}
283 )
284
285 # If this location_type does *not* have a parent type,
286 # this location must have an associated Site.
287 # This check will be removed in the future once Site and Region become special cases of Location;
288 # at that point a "root" LocationType will correctly have no parent (or site) associated.
289 if self.location_type.parent is None and self.site is None:
290 raise ValidationError(
291 {"site": f"A Location of type {self.location_type} has no parent Location, but must have a Site."}
292 )
293
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/nautobot/dcim/models/locations.py b/nautobot/dcim/models/locations.py
--- a/nautobot/dcim/models/locations.py
+++ b/nautobot/dcim/models/locations.py
@@ -70,6 +70,8 @@
Disallow LocationTypes whose name conflicts with existing location-related models, to avoid confusion.
In the longer term we will collapse these other models into special cases of LocationType.
+
+ Also, disallow re-parenting a LocationType if there are Locations already using this LocationType.
"""
super().clean()
@@ -85,6 +87,18 @@
]:
raise ValidationError({"name": "This name is reserved for future use."})
+ if (
+ self.present_in_database
+ and self.parent != LocationType.objects.get(pk=self.pk).parent
+ and self.locations.exists()
+ ):
+ raise ValidationError(
+ {
+ "parent": "This LocationType currently has Locations using it, "
+ "therefore its parent cannot be changed at this time."
+ }
+ )
+
@property
def display(self):
"""
|
{"golden_diff": "diff --git a/nautobot/dcim/models/locations.py b/nautobot/dcim/models/locations.py\n--- a/nautobot/dcim/models/locations.py\n+++ b/nautobot/dcim/models/locations.py\n@@ -70,6 +70,8 @@\n Disallow LocationTypes whose name conflicts with existing location-related models, to avoid confusion.\n \n In the longer term we will collapse these other models into special cases of LocationType.\n+\n+ Also, disallow re-parenting a LocationType if there are Locations already using this LocationType.\n \"\"\"\n super().clean()\n \n@@ -85,6 +87,18 @@\n ]:\n raise ValidationError({\"name\": \"This name is reserved for future use.\"})\n \n+ if (\n+ self.present_in_database\n+ and self.parent != LocationType.objects.get(pk=self.pk).parent\n+ and self.locations.exists()\n+ ):\n+ raise ValidationError(\n+ {\n+ \"parent\": \"This LocationType currently has Locations using it, \"\n+ \"therefore its parent cannot be changed at this time.\"\n+ }\n+ )\n+\n @property\n def display(self):\n \"\"\"\n", "issue": "Child locations up to 1 level down are able to have a site\n<!--\r\n NOTE: IF YOUR ISSUE DOES NOT FOLLOW THIS TEMPLATE, IT WILL BE CLOSED.\r\n\r\n This form is only for reporting reproducible bugs. If you need assistance\r\n with Nautobot installation, or if you have a general question, please start a\r\n discussion instead: https://github.com/nautobot/nautobot/discussions\r\n\r\n Please describe the environment in which you are running Nautobot. Be sure\r\n that you are running an unmodified instance of the latest stable release\r\n before submitting a bug report, and that any plugins have been disabled.\r\n-->\r\n### Environment\r\n* Nautobot version (Docker tag too if applicable): 1.4.5\r\n* Python version: 3.8.10\r\n* Database platform, version: Postgres 12.12\r\n* Middleware(s):\r\n\r\n<!--\r\n Describe in detail the exact steps that someone else can take to reproduce\r\n this bug using the current stable release of Nautobot. Begin with the\r\n creation of any necessary database objects and call out every operation\r\n being performed explicitly. If reporting a bug in the REST API, be sure to\r\n reconstruct the raw HTTP request(s) being made: Don't rely on a client\r\n library such as pynautobot.\r\n\r\n-->\r\n### Steps to Reproduce\r\n1. Create a parent location and attach it to a site\r\n2. Create a child of the above parent, and attach to a site\r\n\r\n<!-- What did you expect to happen? -->\r\n### Expected Behavior\r\nI expected to be unable to add a child location to a site as I am unable to do so with child locations two levels down, IE the child of a child location.\r\n\r\n<!-- What happened instead? -->\r\n### Observed Behavior\r\nI was able to attach a child location to a site\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "from django.contrib.contenttypes.fields import GenericRelation\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.core.exceptions import ObjectDoesNotExist, ValidationError\nfrom django.db import models\nfrom django.urls import reverse\n\nfrom tree_queries.models import TreeNode\n\nfrom nautobot.core.fields import AutoSlugField\nfrom nautobot.core.models.generics import OrganizationalModel, PrimaryModel\nfrom nautobot.extras.models import StatusModel\nfrom nautobot.extras.utils import extras_features, FeatureQuery\nfrom nautobot.utilities.fields import NaturalOrderingField\nfrom nautobot.utilities.tree_queries import TreeManager\n\n\n@extras_features(\n \"custom_fields\",\n \"custom_links\",\n \"custom_validators\",\n \"export_templates\",\n \"graphql\",\n \"relationships\",\n \"webhooks\",\n)\nclass LocationType(TreeNode, OrganizationalModel):\n \"\"\"\n Definition of a category of Locations, including its hierarchical relationship to other LocationTypes.\n\n A LocationType also specifies the content types that can be associated to a Location of this category.\n For example a \"Building\" LocationType might allow Prefix and VLANGroup, but not Devices,\n while a \"Room\" LocationType might allow Racks and Devices.\n \"\"\"\n\n name = models.CharField(max_length=100, unique=True)\n slug = AutoSlugField(populate_from=\"name\")\n description = models.CharField(max_length=200, blank=True)\n content_types = models.ManyToManyField(\n to=ContentType,\n related_name=\"location_types\",\n verbose_name=\"Permitted object types\",\n limit_choices_to=FeatureQuery(\"locations\"),\n help_text=\"The object type(s) that can be associated to a Location of this type.\",\n )\n\n objects = TreeManager()\n\n csv_headers = [\"name\", \"slug\", \"parent\", \"description\", \"content_types\"]\n\n class Meta:\n ordering = (\"name\",)\n\n def __str__(self):\n return self.name\n\n def get_absolute_url(self):\n return reverse(\"dcim:locationtype\", args=[self.slug])\n\n def to_csv(self):\n return (\n self.name,\n self.slug,\n self.parent.name if self.parent else None,\n self.description,\n \",\".join(f\"{ct.app_label}.{ct.model}\" for ct in self.content_types.order_by(\"app_label\", \"model\")),\n )\n\n def clean(self):\n \"\"\"\n Disallow LocationTypes whose name conflicts with existing location-related models, to avoid confusion.\n\n In the longer term we will collapse these other models into special cases of LocationType.\n \"\"\"\n super().clean()\n\n if self.name.lower() in [\n \"region\",\n \"regions\",\n \"site\",\n \"sites\",\n \"rackgroup\",\n \"rackgroups\",\n \"rack group\",\n \"rack groups\",\n ]:\n raise ValidationError({\"name\": \"This name is reserved for future use.\"})\n\n @property\n def display(self):\n \"\"\"\n Include the parent type names as well in order to provide UI clarity.\n `self.ancestors()` returns all the preceding nodes from the top down.\n So if we are looking at node C and its node structure is the following:\n A\n /\n B\n /\n C\n This method will return \"A \u2192 B \u2192 C\".\n Note that `self.ancestors()` may throw an `ObjectDoesNotExist` during bulk-delete operations.\n \"\"\"\n display_str = \"\"\n try:\n for ancestor in self.ancestors():\n display_str += ancestor.name + \" \u2192 \"\n except ObjectDoesNotExist:\n pass\n finally:\n display_str += self.name\n return display_str # pylint: disable=lost-exception\n\n\n@extras_features(\n \"custom_fields\",\n \"custom_links\",\n \"custom_validators\",\n \"export_templates\",\n \"graphql\",\n \"relationships\",\n \"statuses\",\n \"webhooks\",\n)\nclass Location(TreeNode, StatusModel, PrimaryModel):\n \"\"\"\n A Location represents an arbitrarily specific geographic location, such as a campus, building, floor, room, etc.\n\n As presently implemented, Location is an intermediary model between Site and RackGroup - more specific than a Site,\n less specific (and more broadly applicable) than a RackGroup:\n\n Region\n Region\n Site\n Location (location_type=\"Building\")\n Location (location_type=\"Room\")\n RackGroup\n Rack\n Device\n Device\n Prefix\n etc.\n VLANGroup\n Prefix\n etc.\n\n As such, as presently implemented, every Location either has a parent Location or a \"parent\" Site.\n\n In the future, we plan to collapse Region and Site (and likely RackGroup as well) into the Location model.\n \"\"\"\n\n # A Location's name is unique within context of its parent, not globally unique.\n name = models.CharField(max_length=100, db_index=True)\n _name = NaturalOrderingField(target_field=\"name\", max_length=100, blank=True, db_index=True)\n # However a Location's slug *is* globally unique.\n slug = AutoSlugField(populate_from=[\"parent__slug\", \"name\"])\n location_type = models.ForeignKey(\n to=\"dcim.LocationType\",\n on_delete=models.PROTECT,\n related_name=\"locations\",\n )\n site = models.ForeignKey(\n to=\"dcim.Site\",\n on_delete=models.CASCADE,\n related_name=\"locations\",\n blank=True,\n null=True,\n )\n tenant = models.ForeignKey(\n to=\"tenancy.Tenant\",\n on_delete=models.PROTECT,\n related_name=\"locations\",\n blank=True,\n null=True,\n )\n description = models.CharField(max_length=200, blank=True)\n images = GenericRelation(to=\"extras.ImageAttachment\")\n\n objects = TreeManager()\n\n csv_headers = [\n \"name\",\n \"slug\",\n \"location_type\",\n \"site\",\n \"status\",\n \"parent\",\n \"tenant\",\n \"description\",\n ]\n\n clone_fields = [\n \"location_type\",\n \"site\",\n \"status\",\n \"parent\",\n \"tenant\",\n \"description\",\n ]\n\n class Meta:\n ordering = (\"_name\",)\n unique_together = [[\"parent\", \"name\"]]\n\n def __str__(self):\n return self.name\n\n def get_absolute_url(self):\n return reverse(\"dcim:location\", args=[self.slug])\n\n def to_csv(self):\n return (\n self.name,\n self.slug,\n self.location_type.name,\n self.site.name if self.site else None,\n self.get_status_display(),\n self.parent.name if self.parent else None,\n self.tenant.name if self.tenant else None,\n self.description,\n )\n\n @property\n def base_site(self):\n \"\"\"The site that this Location belongs to, if any, or that its root ancestor belongs to, if any.\"\"\"\n return self.site or self.ancestors().first().site\n\n @property\n def display(self):\n \"\"\"\n Location name is unique per parent but not globally unique, so include parent information as context.\n `self.ancestors()` returns all the preceding nodes from the top down.\n So if we are looking at node C and its node structure is the following:\n A\n /\n B\n /\n C\n This method will return \"A \u2192 B \u2192 C\".\n\n Note that `self.ancestors()` may throw an `ObjectDoesNotExist` during bulk-delete operations.\n \"\"\"\n display_str = \"\"\n try:\n for ancestor in self.ancestors():\n display_str += ancestor.name + \" \u2192 \"\n except ObjectDoesNotExist:\n pass\n finally:\n display_str += self.name\n return display_str # pylint: disable=lost-exception\n\n def validate_unique(self, exclude=None):\n # Check for a duplicate name on a Location with no parent.\n # This is necessary because Django does not consider two NULL fields to be equal.\n if self.parent is None:\n if Location.objects.exclude(pk=self.pk).filter(parent__isnull=True, name=self.name).exists():\n raise ValidationError({\"name\": \"A root-level location with this name already exists.\"})\n\n super().validate_unique(exclude=exclude)\n\n def clean(self):\n super().clean()\n\n # Prevent changing location type as that would require a whole bunch of cascading logic checks,\n # e.g. what if the new type doesn't allow all of the associated objects that the old type did?\n if self.present_in_database and self.location_type != Location.objects.get(pk=self.pk).location_type:\n raise ValidationError({\"location_type\": \"Changing the type of an existing Location is not permitted.\"})\n\n if self.location_type.parent is not None:\n # We must have a parent and it must match the parent location_type.\n if self.parent is None or self.parent.location_type != self.location_type.parent:\n raise ValidationError(\n {\n \"parent\": f\"A Location of type {self.location_type} must have \"\n f\"a parent Location of type {self.location_type.parent}\"\n }\n )\n # We must *not* have a site.\n # In a future release, Site will become a kind of Location, and the resulting data migration will be\n # much cleaner if it doesn't have to deal with Locations that have two \"parents\".\n if self.site is not None:\n raise ValidationError(\n {\"site\": f\"A location of type {self.location_type} must not have an associated Site.\"}\n )\n\n # If this location_type does *not* have a parent type,\n # this location must have an associated Site.\n # This check will be removed in the future once Site and Region become special cases of Location;\n # at that point a \"root\" LocationType will correctly have no parent (or site) associated.\n if self.location_type.parent is None and self.site is None:\n raise ValidationError(\n {\"site\": f\"A Location of type {self.location_type} has no parent Location, but must have a Site.\"}\n )\n", "path": "nautobot/dcim/models/locations.py"}], "after_files": [{"content": "from django.contrib.contenttypes.fields import GenericRelation\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.core.exceptions import ObjectDoesNotExist, ValidationError\nfrom django.db import models\nfrom django.urls import reverse\n\nfrom tree_queries.models import TreeNode\n\nfrom nautobot.core.fields import AutoSlugField\nfrom nautobot.core.models.generics import OrganizationalModel, PrimaryModel\nfrom nautobot.extras.models import StatusModel\nfrom nautobot.extras.utils import extras_features, FeatureQuery\nfrom nautobot.utilities.fields import NaturalOrderingField\nfrom nautobot.utilities.tree_queries import TreeManager\n\n\n@extras_features(\n \"custom_fields\",\n \"custom_links\",\n \"custom_validators\",\n \"export_templates\",\n \"graphql\",\n \"relationships\",\n \"webhooks\",\n)\nclass LocationType(TreeNode, OrganizationalModel):\n \"\"\"\n Definition of a category of Locations, including its hierarchical relationship to other LocationTypes.\n\n A LocationType also specifies the content types that can be associated to a Location of this category.\n For example a \"Building\" LocationType might allow Prefix and VLANGroup, but not Devices,\n while a \"Room\" LocationType might allow Racks and Devices.\n \"\"\"\n\n name = models.CharField(max_length=100, unique=True)\n slug = AutoSlugField(populate_from=\"name\")\n description = models.CharField(max_length=200, blank=True)\n content_types = models.ManyToManyField(\n to=ContentType,\n related_name=\"location_types\",\n verbose_name=\"Permitted object types\",\n limit_choices_to=FeatureQuery(\"locations\"),\n help_text=\"The object type(s) that can be associated to a Location of this type.\",\n )\n\n objects = TreeManager()\n\n csv_headers = [\"name\", \"slug\", \"parent\", \"description\", \"content_types\"]\n\n class Meta:\n ordering = (\"name\",)\n\n def __str__(self):\n return self.name\n\n def get_absolute_url(self):\n return reverse(\"dcim:locationtype\", args=[self.slug])\n\n def to_csv(self):\n return (\n self.name,\n self.slug,\n self.parent.name if self.parent else None,\n self.description,\n \",\".join(f\"{ct.app_label}.{ct.model}\" for ct in self.content_types.order_by(\"app_label\", \"model\")),\n )\n\n def clean(self):\n \"\"\"\n Disallow LocationTypes whose name conflicts with existing location-related models, to avoid confusion.\n\n In the longer term we will collapse these other models into special cases of LocationType.\n\n Also, disallow re-parenting a LocationType if there are Locations already using this LocationType.\n \"\"\"\n super().clean()\n\n if self.name.lower() in [\n \"region\",\n \"regions\",\n \"site\",\n \"sites\",\n \"rackgroup\",\n \"rackgroups\",\n \"rack group\",\n \"rack groups\",\n ]:\n raise ValidationError({\"name\": \"This name is reserved for future use.\"})\n\n if (\n self.present_in_database\n and self.parent != LocationType.objects.get(pk=self.pk).parent\n and self.locations.exists()\n ):\n raise ValidationError(\n {\n \"parent\": \"This LocationType currently has Locations using it, \"\n \"therefore its parent cannot be changed at this time.\"\n }\n )\n\n @property\n def display(self):\n \"\"\"\n Include the parent type names as well in order to provide UI clarity.\n `self.ancestors()` returns all the preceding nodes from the top down.\n So if we are looking at node C and its node structure is the following:\n A\n /\n B\n /\n C\n This method will return \"A \u2192 B \u2192 C\".\n Note that `self.ancestors()` may throw an `ObjectDoesNotExist` during bulk-delete operations.\n \"\"\"\n display_str = \"\"\n try:\n for ancestor in self.ancestors():\n display_str += ancestor.name + \" \u2192 \"\n except ObjectDoesNotExist:\n pass\n finally:\n display_str += self.name\n return display_str # pylint: disable=lost-exception\n\n\n@extras_features(\n \"custom_fields\",\n \"custom_links\",\n \"custom_validators\",\n \"export_templates\",\n \"graphql\",\n \"relationships\",\n \"statuses\",\n \"webhooks\",\n)\nclass Location(TreeNode, StatusModel, PrimaryModel):\n \"\"\"\n A Location represents an arbitrarily specific geographic location, such as a campus, building, floor, room, etc.\n\n As presently implemented, Location is an intermediary model between Site and RackGroup - more specific than a Site,\n less specific (and more broadly applicable) than a RackGroup:\n\n Region\n Region\n Site\n Location (location_type=\"Building\")\n Location (location_type=\"Room\")\n RackGroup\n Rack\n Device\n Device\n Prefix\n etc.\n VLANGroup\n Prefix\n etc.\n\n As such, as presently implemented, every Location either has a parent Location or a \"parent\" Site.\n\n In the future, we plan to collapse Region and Site (and likely RackGroup as well) into the Location model.\n \"\"\"\n\n # A Location's name is unique within context of its parent, not globally unique.\n name = models.CharField(max_length=100, db_index=True)\n _name = NaturalOrderingField(target_field=\"name\", max_length=100, blank=True, db_index=True)\n # However a Location's slug *is* globally unique.\n slug = AutoSlugField(populate_from=[\"parent__slug\", \"name\"])\n location_type = models.ForeignKey(\n to=\"dcim.LocationType\",\n on_delete=models.PROTECT,\n related_name=\"locations\",\n )\n site = models.ForeignKey(\n to=\"dcim.Site\",\n on_delete=models.CASCADE,\n related_name=\"locations\",\n blank=True,\n null=True,\n )\n tenant = models.ForeignKey(\n to=\"tenancy.Tenant\",\n on_delete=models.PROTECT,\n related_name=\"locations\",\n blank=True,\n null=True,\n )\n description = models.CharField(max_length=200, blank=True)\n images = GenericRelation(to=\"extras.ImageAttachment\")\n\n objects = TreeManager()\n\n csv_headers = [\n \"name\",\n \"slug\",\n \"location_type\",\n \"site\",\n \"status\",\n \"parent\",\n \"tenant\",\n \"description\",\n ]\n\n clone_fields = [\n \"location_type\",\n \"site\",\n \"status\",\n \"parent\",\n \"tenant\",\n \"description\",\n ]\n\n class Meta:\n ordering = (\"_name\",)\n unique_together = [[\"parent\", \"name\"]]\n\n def __str__(self):\n return self.name\n\n def get_absolute_url(self):\n return reverse(\"dcim:location\", args=[self.slug])\n\n def to_csv(self):\n return (\n self.name,\n self.slug,\n self.location_type.name,\n self.site.name if self.site else None,\n self.get_status_display(),\n self.parent.name if self.parent else None,\n self.tenant.name if self.tenant else None,\n self.description,\n )\n\n @property\n def base_site(self):\n \"\"\"The site that this Location belongs to, if any, or that its root ancestor belongs to, if any.\"\"\"\n return self.site or self.ancestors().first().site\n\n @property\n def display(self):\n \"\"\"\n Location name is unique per parent but not globally unique, so include parent information as context.\n `self.ancestors()` returns all the preceding nodes from the top down.\n So if we are looking at node C and its node structure is the following:\n A\n /\n B\n /\n C\n This method will return \"A \u2192 B \u2192 C\".\n\n Note that `self.ancestors()` may throw an `ObjectDoesNotExist` during bulk-delete operations.\n \"\"\"\n display_str = \"\"\n try:\n for ancestor in self.ancestors():\n display_str += ancestor.name + \" \u2192 \"\n except ObjectDoesNotExist:\n pass\n finally:\n display_str += self.name\n return display_str # pylint: disable=lost-exception\n\n def validate_unique(self, exclude=None):\n # Check for a duplicate name on a Location with no parent.\n # This is necessary because Django does not consider two NULL fields to be equal.\n if self.parent is None:\n if Location.objects.exclude(pk=self.pk).filter(parent__isnull=True, name=self.name).exists():\n raise ValidationError({\"name\": \"A root-level location with this name already exists.\"})\n\n super().validate_unique(exclude=exclude)\n\n def clean(self):\n super().clean()\n\n # Prevent changing location type as that would require a whole bunch of cascading logic checks,\n # e.g. what if the new type doesn't allow all of the associated objects that the old type did?\n if self.present_in_database and self.location_type != Location.objects.get(pk=self.pk).location_type:\n raise ValidationError({\"location_type\": \"Changing the type of an existing Location is not permitted.\"})\n\n if self.location_type.parent is not None:\n # We must have a parent and it must match the parent location_type.\n if self.parent is None or self.parent.location_type != self.location_type.parent:\n raise ValidationError(\n {\n \"parent\": f\"A Location of type {self.location_type} must have \"\n f\"a parent Location of type {self.location_type.parent}\"\n }\n )\n # We must *not* have a site.\n # In a future release, Site will become a kind of Location, and the resulting data migration will be\n # much cleaner if it doesn't have to deal with Locations that have two \"parents\".\n if self.site is not None:\n raise ValidationError(\n {\"site\": f\"A location of type {self.location_type} must not have an associated Site.\"}\n )\n\n # If this location_type does *not* have a parent type,\n # this location must have an associated Site.\n # This check will be removed in the future once Site and Region become special cases of Location;\n # at that point a \"root\" LocationType will correctly have no parent (or site) associated.\n if self.location_type.parent is None and self.site is None:\n raise ValidationError(\n {\"site\": f\"A Location of type {self.location_type} has no parent Location, but must have a Site.\"}\n )\n", "path": "nautobot/dcim/models/locations.py"}]}
| 3,742 | 254 |
gh_patches_debug_1314
|
rasdani/github-patches
|
git_diff
|
apache__airflow-9699
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
TimeSensor triggers immediately when used over midnight (UTC)
<!--
Welcome to Apache Airflow! For a smooth issue process, try to answer the following questions.
Don't worry if they're not all applicable; just try to include what you can :-)
If you need to include code snippets or logs, please put them in fenced code
blocks. If they're super-long, please use the details tag like
<details><summary>super-long log</summary> lots of stuff </details>
Please delete these comment blocks before submitting the issue.
-->
<!--
IMPORTANT!!!
PLEASE CHECK "SIMILAR TO X EXISTING ISSUES" OPTION IF VISIBLE
NEXT TO "SUBMIT NEW ISSUE" BUTTON!!!
PLEASE CHECK IF THIS ISSUE HAS BEEN REPORTED PREVIOUSLY USING SEARCH!!!
Please complete the next sections or the issue will be closed.
This questions are the first thing we need to know to understand the context.
-->
**Apache Airflow version**: 1.10.10 (issue exists in current master as well)
**Environment**: does not seem relevant
**What happened**:
The TimeSensor does trigger if the current time is later than the defined trigger time. Looking at the [source code](https://github.com/apache/airflow/blob/master/airflow/sensors/time_sensor.py), the trigger rule is defined as
```
return timezone.utcnow().time() > self.target_time
```
This leads to problems when the DAG runs over midnight UTC. For example, suppose the following DAG:
```
with DAG('foo',
default_args={'start_date': datetime(2020, 7, 1, tzinfo=pendulum.timezone("Europe/Berlin"))},
schedule_interval="0 0 * * *") as dag:
# in summer, Europe/Berlin is two hours after UTC, hence:
time_04h00_local = TimeSensor(task_id="time_01h30", target_time=time(hour=2, minute=00))
```
This DAG will be triggered at 22:00 UTC. Then, according to the trigger rule:
```
22:00 UTC > 2:00 UTC
```
Hence, the TimeSensor will be triggered immediately.
**What you expected to happen**:
The TimeSensor should trigger at the following day if `target_time < next_execution_date.time()`
**Possible workarounds**:
One can always use the TimeDeltaSensor to archive similar effects. This does result in code that is not as readable, though.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `airflow/sensors/time_sensor.py`
Content:
```
1 #
2 # Licensed to the Apache Software Foundation (ASF) under one
3 # or more contributor license agreements. See the NOTICE file
4 # distributed with this work for additional information
5 # regarding copyright ownership. The ASF licenses this file
6 # to you under the Apache License, Version 2.0 (the
7 # "License"); you may not use this file except in compliance
8 # with the License. You may obtain a copy of the License at
9 #
10 # http://www.apache.org/licenses/LICENSE-2.0
11 #
12 # Unless required by applicable law or agreed to in writing,
13 # software distributed under the License is distributed on an
14 # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
15 # KIND, either express or implied. See the License for the
16 # specific language governing permissions and limitations
17 # under the License.
18
19 from airflow.sensors.base_sensor_operator import BaseSensorOperator
20 from airflow.utils import timezone
21 from airflow.utils.decorators import apply_defaults
22
23
24 class TimeSensor(BaseSensorOperator):
25 """
26 Waits until the specified time of the day.
27
28 :param target_time: time after which the job succeeds
29 :type target_time: datetime.time
30 """
31
32 @apply_defaults
33 def __init__(self, target_time, *args, **kwargs):
34 super().__init__(*args, **kwargs)
35 self.target_time = target_time
36
37 def poke(self, context):
38 self.log.info('Checking if the time (%s) has come', self.target_time)
39 return timezone.utcnow().time() > self.target_time
40
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/airflow/sensors/time_sensor.py b/airflow/sensors/time_sensor.py
--- a/airflow/sensors/time_sensor.py
+++ b/airflow/sensors/time_sensor.py
@@ -36,4 +36,4 @@
def poke(self, context):
self.log.info('Checking if the time (%s) has come', self.target_time)
- return timezone.utcnow().time() > self.target_time
+ return timezone.make_naive(timezone.utcnow()).time() > self.target_time
|
{"golden_diff": "diff --git a/airflow/sensors/time_sensor.py b/airflow/sensors/time_sensor.py\n--- a/airflow/sensors/time_sensor.py\n+++ b/airflow/sensors/time_sensor.py\n@@ -36,4 +36,4 @@\n \n def poke(self, context):\n self.log.info('Checking if the time (%s) has come', self.target_time)\n- return timezone.utcnow().time() > self.target_time\n+ return timezone.make_naive(timezone.utcnow()).time() > self.target_time\n", "issue": "TimeSensor triggers immediately when used over midnight (UTC)\n<!--\r\n\r\nWelcome to Apache Airflow! For a smooth issue process, try to answer the following questions.\r\nDon't worry if they're not all applicable; just try to include what you can :-)\r\n\r\nIf you need to include code snippets or logs, please put them in fenced code\r\nblocks. If they're super-long, please use the details tag like\r\n<details><summary>super-long log</summary> lots of stuff </details>\r\n\r\nPlease delete these comment blocks before submitting the issue.\r\n\r\n-->\r\n\r\n<!--\r\n\r\nIMPORTANT!!!\r\n\r\nPLEASE CHECK \"SIMILAR TO X EXISTING ISSUES\" OPTION IF VISIBLE\r\nNEXT TO \"SUBMIT NEW ISSUE\" BUTTON!!!\r\n\r\nPLEASE CHECK IF THIS ISSUE HAS BEEN REPORTED PREVIOUSLY USING SEARCH!!!\r\n\r\nPlease complete the next sections or the issue will be closed.\r\nThis questions are the first thing we need to know to understand the context.\r\n\r\n-->\r\n\r\n**Apache Airflow version**: 1.10.10 (issue exists in current master as well)\r\n\r\n**Environment**: does not seem relevant\r\n\r\n**What happened**:\r\n\r\nThe TimeSensor does trigger if the current time is later than the defined trigger time. Looking at the [source code](https://github.com/apache/airflow/blob/master/airflow/sensors/time_sensor.py), the trigger rule is defined as\r\n```\r\nreturn timezone.utcnow().time() > self.target_time\r\n```\r\nThis leads to problems when the DAG runs over midnight UTC. For example, suppose the following DAG:\r\n\r\n```\r\nwith DAG('foo', \r\n default_args={'start_date': datetime(2020, 7, 1, tzinfo=pendulum.timezone(\"Europe/Berlin\"))}, \r\n schedule_interval=\"0 0 * * *\") as dag:\r\n\r\n # in summer, Europe/Berlin is two hours after UTC, hence: \r\n time_04h00_local = TimeSensor(task_id=\"time_01h30\", target_time=time(hour=2, minute=00))\r\n```\r\n\r\nThis DAG will be triggered at 22:00 UTC. Then, according to the trigger rule:\r\n```\r\n22:00 UTC > 2:00 UTC\r\n```\r\nHence, the TimeSensor will be triggered immediately. \r\n\r\n**What you expected to happen**:\r\n\r\nThe TimeSensor should trigger at the following day if `target_time < next_execution_date.time()`\r\n\r\n**Possible workarounds**:\r\n\r\nOne can always use the TimeDeltaSensor to archive similar effects. This does result in code that is not as readable, though. \n", "before_files": [{"content": "#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nfrom airflow.sensors.base_sensor_operator import BaseSensorOperator\nfrom airflow.utils import timezone\nfrom airflow.utils.decorators import apply_defaults\n\n\nclass TimeSensor(BaseSensorOperator):\n \"\"\"\n Waits until the specified time of the day.\n\n :param target_time: time after which the job succeeds\n :type target_time: datetime.time\n \"\"\"\n\n @apply_defaults\n def __init__(self, target_time, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.target_time = target_time\n\n def poke(self, context):\n self.log.info('Checking if the time (%s) has come', self.target_time)\n return timezone.utcnow().time() > self.target_time\n", "path": "airflow/sensors/time_sensor.py"}], "after_files": [{"content": "#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nfrom airflow.sensors.base_sensor_operator import BaseSensorOperator\nfrom airflow.utils import timezone\nfrom airflow.utils.decorators import apply_defaults\n\n\nclass TimeSensor(BaseSensorOperator):\n \"\"\"\n Waits until the specified time of the day.\n\n :param target_time: time after which the job succeeds\n :type target_time: datetime.time\n \"\"\"\n\n @apply_defaults\n def __init__(self, target_time, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.target_time = target_time\n\n def poke(self, context):\n self.log.info('Checking if the time (%s) has come', self.target_time)\n return timezone.make_naive(timezone.utcnow()).time() > self.target_time\n", "path": "airflow/sensors/time_sensor.py"}]}
| 1,200 | 114 |
gh_patches_debug_5441
|
rasdani/github-patches
|
git_diff
|
dotkom__onlineweb4-724
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Committee positions in dashboard looks ugly as shit
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `apps/authentication/models.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 import datetime
4 import socket
5 import urllib
6 import hashlib
7
8 from django.conf import settings
9 from django.contrib.auth.models import AbstractUser
10 from django.db import models
11 from django.utils.translation import ugettext as _
12 from django.utils import timezone
13 from django.utils.html import strip_tags
14
15 import watson
16
17
18 # If this list is changed, remember to check that the year property on
19 # OnlineUser is still correct!
20 FIELD_OF_STUDY_CHOICES = [
21 (0, _(u'Gjest')),
22 (1, _(u'Bachelor i Informatikk (BIT)')),
23 # master degrees take up the interval [10,30]
24 (10, _(u'Software (SW)')),
25 (11, _(u'Informasjonsforvaltning (DIF)')),
26 (12, _(u'Komplekse Datasystemer (KDS)')),
27 (13, _(u'Spillteknologi (SPT)')),
28 (14, _(u'Intelligente Systemer (IRS)')),
29 (15, _(u'Helseinformatikk (MSMEDTEK)')),
30 (30, _(u'Annen mastergrad')),
31 (80, _(u'PhD')),
32 (90, _(u'International')),
33 (100, _(u'Annet Onlinemedlem')),
34 ]
35
36 GENDER_CHOICES = [
37 ("male", _(u"mann")),
38 ("female", _(u"kvinne")),
39 ]
40
41 COMMITTEES = [
42 ('hs', _(u'Hovedstyret')),
43 ('arrkom', _(u'Arrangementskomiteen')),
44 ('bankom', _(u'Bank- og økonomikomiteen')),
45 ('bedkom', _(u'Bedriftskomiteen')),
46 ('dotkom', _(u'Drifts- og utviklingskomiteen')),
47 ('ekskom', _(u'Ekskursjonskomiteen')),
48 ('fagkom', _(u'Fag- og kurskomiteen')),
49 ('jubkom', _(u'Jubileumskomiteen')),
50 ('pangkom', _(u'Pensjonistkomiteen')),
51 ('prokom', _(u'Profil-og aviskomiteen')),
52 ('trikom', _(u'Trivselskomiteen')),
53 ('velkom', _(u'Velkomstkomiteen')),
54 ]
55
56 POSITIONS = [
57 ('medlem', _(u'Medlem')),
58 ('leder', _(u'Leder')),
59 ('nestleder', _(u'Nestleder')),
60 ('okonomiansvarlig', _(u'Økonomiansvarlig')),
61 ]
62
63 class OnlineUser(AbstractUser):
64
65 IMAGE_FOLDER = "images/profiles"
66 IMAGE_EXTENSIONS = ['.jpg', '.jpeg', '.gif', '.png']
67
68 # Online related fields
69 field_of_study = models.SmallIntegerField(_(u"studieretning"), choices=FIELD_OF_STUDY_CHOICES, default=0)
70 started_date = models.DateField(_(u"startet studie"), default=timezone.now().date())
71 compiled = models.BooleanField(_(u"kompilert"), default=False)
72
73 # Email
74 infomail = models.BooleanField(_(u"vil ha infomail"), default=True)
75
76 # Address
77 phone_number = models.CharField(_(u"telefonnummer"), max_length=20, blank=True, null=True)
78 address = models.CharField(_(u"adresse"), max_length=30, blank=True, null=True)
79 zip_code = models.CharField(_(u"postnummer"), max_length=4, blank=True, null=True)
80
81 # Other
82 allergies = models.TextField(_(u"allergier"), blank=True, null=True)
83 mark_rules = models.BooleanField(_(u"godtatt prikkeregler"), default=False)
84 rfid = models.CharField(_(u"RFID"), max_length=50, blank=True, null=True)
85 nickname = models.CharField(_(u"nickname"), max_length=50, blank=True, null=True)
86 website = models.URLField(_(u"hjemmeside"), blank=True, null=True)
87 gender = models.CharField(_(u"kjønn"), max_length=10, choices=GENDER_CHOICES, default="male")
88
89 # NTNU credentials
90 ntnu_username = models.CharField(_(u"NTNU-brukernavn"), max_length=10, blank=True, null=True, unique=True)
91
92 # TODO checkbox for forwarding of @online.ntnu.no mail
93
94 @property
95 def is_member(self):
96 """
97 Returns true if the User object is associated with Online.
98 """
99 if self.ntnu_username:
100 if AllowedUsername.objects.filter(username=self.ntnu_username.lower()).filter(expiration_date__gte=timezone.now()).count() > 0:
101 return True
102 return False
103
104 def get_full_name(self):
105 """
106 Returns the first_name plus the last_name, with a space in between.
107 """
108 full_name = u'%s %s' % (self.first_name, self.last_name)
109 return full_name.strip()
110
111 def get_email(self):
112 email = self.get_emails().filter(primary = True)
113 if email:
114 return email[0]
115 return None
116
117 def get_emails(self):
118 return Email.objects.all().filter(user = self)
119
120 def in_group(self, group_name):
121 return reduce(lambda x,y: x or y.name == group_name, self.groups.all(), False)
122
123 @property
124 def year(self):
125 today = timezone.now().date()
126 started = self.started_date
127
128 # We say that a year is 360 days incase we are a bit slower to
129 # add users one year.
130 year = ((today - started).days / 360) + 1
131
132 if self.field_of_study == 0 or self.field_of_study == 100: # others
133 return 0
134 # dont return a bachelor student as 4th or 5th grade
135 elif self.field_of_study == 1: # bachelor
136 if year > 3:
137 return 3
138 return year
139 elif 10 <= self.field_of_study <= 30: # 10-29 is considered master
140 if year >= 2:
141 return 5
142 return 4
143 elif self.field_of_study == 80: # phd
144 return year + 5
145 elif self.field_of_study == 90: # international
146 if year == 1:
147 return 1
148 return 4
149 # If user's field of study is not matched by any of these tests, return -1
150 else:
151 return -1
152
153 @models.permalink
154 def get_absolute_url(self):
155 return ('profiles_view', None, {'username': self.username})
156
157 def __unicode__(self):
158 return self.get_full_name()
159
160 def save(self, *args, **kwargs):
161 if self.ntnu_username == "":
162 self.ntnu_username = None
163 super(OnlineUser, self).save(*args, **kwargs)
164
165 def serializable_object(self):
166 if self.privacy.expose_phone_number:
167 phone = self.phone_number
168 else:
169 phone = "Ikke tilgjengelig"
170
171 return {
172 'id': self.id,
173 'phone': strip_tags(phone),
174 'username': strip_tags(self.username),
175 'value': strip_tags(self.get_full_name()), # typeahead
176 'name': strip_tags(self.get_full_name()),
177 'image': self.get_image_url(),
178 }
179
180 def get_image_url(self, size=50):
181 default = "%s%s_%s.png" % (settings.BASE_URL,
182 settings.DEFAULT_PROFILE_PICTURE_PREFIX, self.gender)
183
184 gravatar_url = "https://www.gravatar.com/avatar/" + hashlib.md5(self.email).hexdigest() + "?"
185 gravatar_url += urllib.urlencode({'d': default, 's':str(size)})
186 return gravatar_url
187
188 class Meta:
189 ordering = ['first_name', 'last_name']
190 verbose_name = _(u"brukerprofil")
191 verbose_name_plural = _(u"brukerprofiler")
192
193
194 class Email(models.Model):
195 user = models.ForeignKey(OnlineUser, related_name="email_user")
196 email = models.EmailField(_(u"epostadresse"), unique=True)
197 primary = models.BooleanField(_(u"primær"), default=False)
198 verified = models.BooleanField(_(u"verifisert"), default=False, editable=False)
199
200 def save(self, *args, **kwargs):
201 primary_email = self.user.get_email()
202 if not primary_email:
203 self.primary = True
204 elif primary_email.email != self.email:
205 self.primary = False
206 self.email = self.email.lower()
207 if self.primary:
208 self.user.email = self.email
209 self.user.save()
210 super(Email, self).save(*args, **kwargs)
211
212 def __unicode__(self):
213 return self.email
214
215 class Meta:
216 verbose_name = _(u"epostadresse")
217 verbose_name_plural = _(u"epostadresser")
218
219
220 class RegisterToken(models.Model):
221 user = models.ForeignKey(OnlineUser, related_name="register_user")
222 email = models.EmailField(_(u"epost"), max_length=254)
223 token = models.CharField(_(u"token"), max_length=32)
224 created = models.DateTimeField(_(u"opprettet dato"), editable=False, auto_now_add=True)
225
226 @property
227 def is_valid(self):
228 valid_period = datetime.timedelta(days=1)
229 now = timezone.now()
230 return now < self.created + valid_period
231
232
233 class AllowedUsername(models.Model):
234 """
235 Holds usernames that are considered valid members of Online and the time they expire.
236 """
237 username = models.CharField(_(u"NTNU-brukernavn"), max_length=10, unique=True)
238 registered = models.DateField(_(u"registrert"))
239 note = models.CharField(_(u"notat"), max_length=100)
240 description = models.TextField(_(u"beskrivelse"), blank=True, null=True)
241 expiration_date = models.DateField(_(u"utløpsdato"))
242
243 @property
244 def is_active(self):
245 return timezone.now().date() < self.expiration_date
246
247 def save(self, *args, **kwargs):
248 self.username = self.username.lower()
249 super(AllowedUsername, self).save(*args, **kwargs)
250
251 def __unicode__(self):
252 return self.username
253
254 class Meta:
255 verbose_name = _(u"medlem")
256 verbose_name_plural = _(u"medlemsregister")
257 ordering = (u"username",)
258
259
260 class Position(models.Model):
261 """
262 Contains a users position in the organization from a given year
263 """
264 period = models.CharField(_(u'periode'), max_length=9, default="2013-2014", blank=False)
265 committee = models.CharField(_(u"komite"), max_length=10, choices=COMMITTEES, default="hs")
266 position = models.CharField(_(u"stilling"), max_length=10, choices=POSITIONS, default="medlem")
267 user = models.ForeignKey(OnlineUser, related_name='positions', blank=False)
268
269 @property
270 def print_string(self):
271 return '%s: %s(%s)' % (self.period, self.committee, self.position)
272
273 def __unicode__(self):
274 return self.print_string
275
276 class Meta:
277 verbose_name = _(u'posisjon')
278 verbose_name_plural = _(u'posisjoner')
279 ordering = (u'user',)
280
281 # Static method for resetting all users mark rules accepted field to false due to changes in mark rules
282 def reset_marks_acceptance():
283 for user in OnlineUser.objects.all():
284 user.mark_rules = False
285 user.save()
286
287 # Register OnlineUser in watson index for searching
288 watson.register(OnlineUser)
289
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/apps/authentication/models.py b/apps/authentication/models.py
--- a/apps/authentication/models.py
+++ b/apps/authentication/models.py
@@ -276,7 +276,7 @@
class Meta:
verbose_name = _(u'posisjon')
verbose_name_plural = _(u'posisjoner')
- ordering = (u'user',)
+ ordering = (u'user', u'period', )
# Static method for resetting all users mark rules accepted field to false due to changes in mark rules
def reset_marks_acceptance():
|
{"golden_diff": "diff --git a/apps/authentication/models.py b/apps/authentication/models.py\n--- a/apps/authentication/models.py\n+++ b/apps/authentication/models.py\n@@ -276,7 +276,7 @@\n class Meta:\n verbose_name = _(u'posisjon')\n verbose_name_plural = _(u'posisjoner')\n- ordering = (u'user',)\n+ ordering = (u'user', u'period', )\n \n # Static method for resetting all users mark rules accepted field to false due to changes in mark rules\n def reset_marks_acceptance():\n", "issue": "Committee positions in dashboard looks ugly as shit\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport datetime\nimport socket\nimport urllib\nimport hashlib\n\nfrom django.conf import settings\nfrom django.contrib.auth.models import AbstractUser\nfrom django.db import models\nfrom django.utils.translation import ugettext as _\nfrom django.utils import timezone\nfrom django.utils.html import strip_tags\n\nimport watson\n\n\n# If this list is changed, remember to check that the year property on\n# OnlineUser is still correct!\nFIELD_OF_STUDY_CHOICES = [\n (0, _(u'Gjest')),\n (1, _(u'Bachelor i Informatikk (BIT)')),\n # master degrees take up the interval [10,30]\n (10, _(u'Software (SW)')),\n (11, _(u'Informasjonsforvaltning (DIF)')),\n (12, _(u'Komplekse Datasystemer (KDS)')),\n (13, _(u'Spillteknologi (SPT)')),\n (14, _(u'Intelligente Systemer (IRS)')),\n (15, _(u'Helseinformatikk (MSMEDTEK)')),\n (30, _(u'Annen mastergrad')),\n (80, _(u'PhD')),\n (90, _(u'International')),\n (100, _(u'Annet Onlinemedlem')),\n]\n\nGENDER_CHOICES = [\n (\"male\", _(u\"mann\")),\n (\"female\", _(u\"kvinne\")),\n]\n\nCOMMITTEES = [\n ('hs', _(u'Hovedstyret')),\n ('arrkom', _(u'Arrangementskomiteen')),\n ('bankom', _(u'Bank- og \u00f8konomikomiteen')),\n ('bedkom', _(u'Bedriftskomiteen')),\n ('dotkom', _(u'Drifts- og utviklingskomiteen')),\n ('ekskom', _(u'Ekskursjonskomiteen')),\n ('fagkom', _(u'Fag- og kurskomiteen')),\n ('jubkom', _(u'Jubileumskomiteen')),\n ('pangkom', _(u'Pensjonistkomiteen')),\n ('prokom', _(u'Profil-og aviskomiteen')),\n ('trikom', _(u'Trivselskomiteen')),\n ('velkom', _(u'Velkomstkomiteen')),\n]\n\nPOSITIONS = [\n ('medlem', _(u'Medlem')),\n ('leder', _(u'Leder')),\n ('nestleder', _(u'Nestleder')),\n ('okonomiansvarlig', _(u'\u00d8konomiansvarlig')),\n]\n\nclass OnlineUser(AbstractUser):\n\n IMAGE_FOLDER = \"images/profiles\"\n IMAGE_EXTENSIONS = ['.jpg', '.jpeg', '.gif', '.png']\n \n # Online related fields\n field_of_study = models.SmallIntegerField(_(u\"studieretning\"), choices=FIELD_OF_STUDY_CHOICES, default=0)\n started_date = models.DateField(_(u\"startet studie\"), default=timezone.now().date())\n compiled = models.BooleanField(_(u\"kompilert\"), default=False)\n\n # Email\n infomail = models.BooleanField(_(u\"vil ha infomail\"), default=True)\n\n # Address\n phone_number = models.CharField(_(u\"telefonnummer\"), max_length=20, blank=True, null=True)\n address = models.CharField(_(u\"adresse\"), max_length=30, blank=True, null=True)\n zip_code = models.CharField(_(u\"postnummer\"), max_length=4, blank=True, null=True)\n\n # Other\n allergies = models.TextField(_(u\"allergier\"), blank=True, null=True)\n mark_rules = models.BooleanField(_(u\"godtatt prikkeregler\"), default=False)\n rfid = models.CharField(_(u\"RFID\"), max_length=50, blank=True, null=True)\n nickname = models.CharField(_(u\"nickname\"), max_length=50, blank=True, null=True)\n website = models.URLField(_(u\"hjemmeside\"), blank=True, null=True)\n gender = models.CharField(_(u\"kj\u00f8nn\"), max_length=10, choices=GENDER_CHOICES, default=\"male\")\n\n # NTNU credentials\n ntnu_username = models.CharField(_(u\"NTNU-brukernavn\"), max_length=10, blank=True, null=True, unique=True)\n\n # TODO checkbox for forwarding of @online.ntnu.no mail\n\n @property\n def is_member(self):\n \"\"\"\n Returns true if the User object is associated with Online.\n \"\"\"\n if self.ntnu_username:\n if AllowedUsername.objects.filter(username=self.ntnu_username.lower()).filter(expiration_date__gte=timezone.now()).count() > 0:\n return True\n return False\n\n def get_full_name(self):\n \"\"\"\n Returns the first_name plus the last_name, with a space in between.\n \"\"\"\n full_name = u'%s %s' % (self.first_name, self.last_name)\n return full_name.strip()\n\n def get_email(self):\n email = self.get_emails().filter(primary = True)\n if email:\n return email[0]\n return None\n\n def get_emails(self):\n return Email.objects.all().filter(user = self)\n\n def in_group(self, group_name):\n return reduce(lambda x,y: x or y.name == group_name, self.groups.all(), False)\n\n @property\n def year(self):\n today = timezone.now().date()\n started = self.started_date\n\n # We say that a year is 360 days incase we are a bit slower to\n # add users one year.\n year = ((today - started).days / 360) + 1\n\n if self.field_of_study == 0 or self.field_of_study == 100: # others\n return 0\n # dont return a bachelor student as 4th or 5th grade\n elif self.field_of_study == 1: # bachelor\n if year > 3:\n return 3\n return year\n elif 10 <= self.field_of_study <= 30: # 10-29 is considered master\n if year >= 2:\n return 5\n return 4\n elif self.field_of_study == 80: # phd\n return year + 5\n elif self.field_of_study == 90: # international\n if year == 1:\n return 1\n return 4\n # If user's field of study is not matched by any of these tests, return -1\n else:\n return -1\n\n @models.permalink\n def get_absolute_url(self):\n return ('profiles_view', None, {'username': self.username})\n\n def __unicode__(self):\n return self.get_full_name()\n\n def save(self, *args, **kwargs):\n if self.ntnu_username == \"\":\n self.ntnu_username = None\n super(OnlineUser, self).save(*args, **kwargs)\n\n def serializable_object(self):\n if self.privacy.expose_phone_number:\n phone = self.phone_number\n else:\n phone = \"Ikke tilgjengelig\"\n\n return {\n 'id': self.id,\n 'phone': strip_tags(phone),\n 'username': strip_tags(self.username),\n 'value': strip_tags(self.get_full_name()), # typeahead\n 'name': strip_tags(self.get_full_name()),\n 'image': self.get_image_url(),\n }\n\n def get_image_url(self, size=50):\n default = \"%s%s_%s.png\" % (settings.BASE_URL,\n settings.DEFAULT_PROFILE_PICTURE_PREFIX, self.gender)\n\n gravatar_url = \"https://www.gravatar.com/avatar/\" + hashlib.md5(self.email).hexdigest() + \"?\"\n gravatar_url += urllib.urlencode({'d': default, 's':str(size)})\n return gravatar_url\n\n class Meta:\n ordering = ['first_name', 'last_name']\n verbose_name = _(u\"brukerprofil\")\n verbose_name_plural = _(u\"brukerprofiler\")\n\n\nclass Email(models.Model):\n user = models.ForeignKey(OnlineUser, related_name=\"email_user\")\n email = models.EmailField(_(u\"epostadresse\"), unique=True)\n primary = models.BooleanField(_(u\"prim\u00e6r\"), default=False)\n verified = models.BooleanField(_(u\"verifisert\"), default=False, editable=False)\n\n def save(self, *args, **kwargs):\n primary_email = self.user.get_email()\n if not primary_email:\n self.primary = True\n elif primary_email.email != self.email:\n self.primary = False\n self.email = self.email.lower()\n if self.primary:\n self.user.email = self.email\n self.user.save()\n super(Email, self).save(*args, **kwargs)\n\n def __unicode__(self):\n return self.email\n\n class Meta:\n verbose_name = _(u\"epostadresse\")\n verbose_name_plural = _(u\"epostadresser\")\n\n\nclass RegisterToken(models.Model):\n user = models.ForeignKey(OnlineUser, related_name=\"register_user\")\n email = models.EmailField(_(u\"epost\"), max_length=254)\n token = models.CharField(_(u\"token\"), max_length=32)\n created = models.DateTimeField(_(u\"opprettet dato\"), editable=False, auto_now_add=True)\n\n @property\n def is_valid(self):\n valid_period = datetime.timedelta(days=1)\n now = timezone.now()\n return now < self.created + valid_period \n\n\nclass AllowedUsername(models.Model):\n \"\"\"\n Holds usernames that are considered valid members of Online and the time they expire.\n \"\"\"\n username = models.CharField(_(u\"NTNU-brukernavn\"), max_length=10, unique=True)\n registered = models.DateField(_(u\"registrert\"))\n note = models.CharField(_(u\"notat\"), max_length=100)\n description = models.TextField(_(u\"beskrivelse\"), blank=True, null=True)\n expiration_date = models.DateField(_(u\"utl\u00f8psdato\"))\n\n @property\n def is_active(self):\n return timezone.now().date() < self.expiration_date\n\n def save(self, *args, **kwargs):\n self.username = self.username.lower()\n super(AllowedUsername, self).save(*args, **kwargs)\n\n def __unicode__(self):\n return self.username\n\n class Meta:\n verbose_name = _(u\"medlem\")\n verbose_name_plural = _(u\"medlemsregister\")\n ordering = (u\"username\",)\n\n\nclass Position(models.Model):\n \"\"\"\n Contains a users position in the organization from a given year\n \"\"\"\n period = models.CharField(_(u'periode'), max_length=9, default=\"2013-2014\", blank=False)\n committee = models.CharField(_(u\"komite\"), max_length=10, choices=COMMITTEES, default=\"hs\")\n position = models.CharField(_(u\"stilling\"), max_length=10, choices=POSITIONS, default=\"medlem\")\n user = models.ForeignKey(OnlineUser, related_name='positions', blank=False)\n\n @property\n def print_string(self):\n return '%s: %s(%s)' % (self.period, self.committee, self.position)\n\n def __unicode__(self):\n return self.print_string\n\n class Meta:\n verbose_name = _(u'posisjon')\n verbose_name_plural = _(u'posisjoner')\n ordering = (u'user',)\n\n# Static method for resetting all users mark rules accepted field to false due to changes in mark rules\ndef reset_marks_acceptance():\n for user in OnlineUser.objects.all():\n user.mark_rules = False\n user.save()\n\n# Register OnlineUser in watson index for searching\nwatson.register(OnlineUser)\n", "path": "apps/authentication/models.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport datetime\nimport socket\nimport urllib\nimport hashlib\n\nfrom django.conf import settings\nfrom django.contrib.auth.models import AbstractUser\nfrom django.db import models\nfrom django.utils.translation import ugettext as _\nfrom django.utils import timezone\nfrom django.utils.html import strip_tags\n\nimport watson\n\n\n# If this list is changed, remember to check that the year property on\n# OnlineUser is still correct!\nFIELD_OF_STUDY_CHOICES = [\n (0, _(u'Gjest')),\n (1, _(u'Bachelor i Informatikk (BIT)')),\n # master degrees take up the interval [10,30]\n (10, _(u'Software (SW)')),\n (11, _(u'Informasjonsforvaltning (DIF)')),\n (12, _(u'Komplekse Datasystemer (KDS)')),\n (13, _(u'Spillteknologi (SPT)')),\n (14, _(u'Intelligente Systemer (IRS)')),\n (15, _(u'Helseinformatikk (MSMEDTEK)')),\n (30, _(u'Annen mastergrad')),\n (80, _(u'PhD')),\n (90, _(u'International')),\n (100, _(u'Annet Onlinemedlem')),\n]\n\nGENDER_CHOICES = [\n (\"male\", _(u\"mann\")),\n (\"female\", _(u\"kvinne\")),\n]\n\nCOMMITTEES = [\n ('hs', _(u'Hovedstyret')),\n ('arrkom', _(u'Arrangementskomiteen')),\n ('bankom', _(u'Bank- og \u00f8konomikomiteen')),\n ('bedkom', _(u'Bedriftskomiteen')),\n ('dotkom', _(u'Drifts- og utviklingskomiteen')),\n ('ekskom', _(u'Ekskursjonskomiteen')),\n ('fagkom', _(u'Fag- og kurskomiteen')),\n ('jubkom', _(u'Jubileumskomiteen')),\n ('pangkom', _(u'Pensjonistkomiteen')),\n ('prokom', _(u'Profil-og aviskomiteen')),\n ('trikom', _(u'Trivselskomiteen')),\n ('velkom', _(u'Velkomstkomiteen')),\n]\n\nPOSITIONS = [\n ('medlem', _(u'Medlem')),\n ('leder', _(u'Leder')),\n ('nestleder', _(u'Nestleder')),\n ('okonomiansvarlig', _(u'\u00d8konomiansvarlig')),\n]\n\nclass OnlineUser(AbstractUser):\n\n IMAGE_FOLDER = \"images/profiles\"\n IMAGE_EXTENSIONS = ['.jpg', '.jpeg', '.gif', '.png']\n \n # Online related fields\n field_of_study = models.SmallIntegerField(_(u\"studieretning\"), choices=FIELD_OF_STUDY_CHOICES, default=0)\n started_date = models.DateField(_(u\"startet studie\"), default=timezone.now().date())\n compiled = models.BooleanField(_(u\"kompilert\"), default=False)\n\n # Email\n infomail = models.BooleanField(_(u\"vil ha infomail\"), default=True)\n\n # Address\n phone_number = models.CharField(_(u\"telefonnummer\"), max_length=20, blank=True, null=True)\n address = models.CharField(_(u\"adresse\"), max_length=30, blank=True, null=True)\n zip_code = models.CharField(_(u\"postnummer\"), max_length=4, blank=True, null=True)\n\n # Other\n allergies = models.TextField(_(u\"allergier\"), blank=True, null=True)\n mark_rules = models.BooleanField(_(u\"godtatt prikkeregler\"), default=False)\n rfid = models.CharField(_(u\"RFID\"), max_length=50, blank=True, null=True)\n nickname = models.CharField(_(u\"nickname\"), max_length=50, blank=True, null=True)\n website = models.URLField(_(u\"hjemmeside\"), blank=True, null=True)\n gender = models.CharField(_(u\"kj\u00f8nn\"), max_length=10, choices=GENDER_CHOICES, default=\"male\")\n\n # NTNU credentials\n ntnu_username = models.CharField(_(u\"NTNU-brukernavn\"), max_length=10, blank=True, null=True, unique=True)\n\n # TODO checkbox for forwarding of @online.ntnu.no mail\n\n @property\n def is_member(self):\n \"\"\"\n Returns true if the User object is associated with Online.\n \"\"\"\n if self.ntnu_username:\n if AllowedUsername.objects.filter(username=self.ntnu_username.lower()).filter(expiration_date__gte=timezone.now()).count() > 0:\n return True\n return False\n\n def get_full_name(self):\n \"\"\"\n Returns the first_name plus the last_name, with a space in between.\n \"\"\"\n full_name = u'%s %s' % (self.first_name, self.last_name)\n return full_name.strip()\n\n def get_email(self):\n email = self.get_emails().filter(primary = True)\n if email:\n return email[0]\n return None\n\n def get_emails(self):\n return Email.objects.all().filter(user = self)\n\n def in_group(self, group_name):\n return reduce(lambda x,y: x or y.name == group_name, self.groups.all(), False)\n\n @property\n def year(self):\n today = timezone.now().date()\n started = self.started_date\n\n # We say that a year is 360 days incase we are a bit slower to\n # add users one year.\n year = ((today - started).days / 360) + 1\n\n if self.field_of_study == 0 or self.field_of_study == 100: # others\n return 0\n # dont return a bachelor student as 4th or 5th grade\n elif self.field_of_study == 1: # bachelor\n if year > 3:\n return 3\n return year\n elif 10 <= self.field_of_study <= 30: # 10-29 is considered master\n if year >= 2:\n return 5\n return 4\n elif self.field_of_study == 80: # phd\n return year + 5\n elif self.field_of_study == 90: # international\n if year == 1:\n return 1\n return 4\n # If user's field of study is not matched by any of these tests, return -1\n else:\n return -1\n\n @models.permalink\n def get_absolute_url(self):\n return ('profiles_view', None, {'username': self.username})\n\n def __unicode__(self):\n return self.get_full_name()\n\n def save(self, *args, **kwargs):\n if self.ntnu_username == \"\":\n self.ntnu_username = None\n super(OnlineUser, self).save(*args, **kwargs)\n\n def serializable_object(self):\n if self.privacy.expose_phone_number:\n phone = self.phone_number\n else:\n phone = \"Ikke tilgjengelig\"\n\n return {\n 'id': self.id,\n 'phone': strip_tags(phone),\n 'username': strip_tags(self.username),\n 'value': strip_tags(self.get_full_name()), # typeahead\n 'name': strip_tags(self.get_full_name()),\n 'image': self.get_image_url(),\n }\n\n def get_image_url(self, size=50):\n default = \"%s%s_%s.png\" % (settings.BASE_URL,\n settings.DEFAULT_PROFILE_PICTURE_PREFIX, self.gender)\n\n gravatar_url = \"https://www.gravatar.com/avatar/\" + hashlib.md5(self.email).hexdigest() + \"?\"\n gravatar_url += urllib.urlencode({'d': default, 's':str(size)})\n return gravatar_url\n\n class Meta:\n ordering = ['first_name', 'last_name']\n verbose_name = _(u\"brukerprofil\")\n verbose_name_plural = _(u\"brukerprofiler\")\n\n\nclass Email(models.Model):\n user = models.ForeignKey(OnlineUser, related_name=\"email_user\")\n email = models.EmailField(_(u\"epostadresse\"), unique=True)\n primary = models.BooleanField(_(u\"prim\u00e6r\"), default=False)\n verified = models.BooleanField(_(u\"verifisert\"), default=False, editable=False)\n\n def save(self, *args, **kwargs):\n primary_email = self.user.get_email()\n if not primary_email:\n self.primary = True\n elif primary_email.email != self.email:\n self.primary = False\n self.email = self.email.lower()\n if self.primary:\n self.user.email = self.email\n self.user.save()\n super(Email, self).save(*args, **kwargs)\n\n def __unicode__(self):\n return self.email\n\n class Meta:\n verbose_name = _(u\"epostadresse\")\n verbose_name_plural = _(u\"epostadresser\")\n\n\nclass RegisterToken(models.Model):\n user = models.ForeignKey(OnlineUser, related_name=\"register_user\")\n email = models.EmailField(_(u\"epost\"), max_length=254)\n token = models.CharField(_(u\"token\"), max_length=32)\n created = models.DateTimeField(_(u\"opprettet dato\"), editable=False, auto_now_add=True)\n\n @property\n def is_valid(self):\n valid_period = datetime.timedelta(days=1)\n now = timezone.now()\n return now < self.created + valid_period \n\n\nclass AllowedUsername(models.Model):\n \"\"\"\n Holds usernames that are considered valid members of Online and the time they expire.\n \"\"\"\n username = models.CharField(_(u\"NTNU-brukernavn\"), max_length=10, unique=True)\n registered = models.DateField(_(u\"registrert\"))\n note = models.CharField(_(u\"notat\"), max_length=100)\n description = models.TextField(_(u\"beskrivelse\"), blank=True, null=True)\n expiration_date = models.DateField(_(u\"utl\u00f8psdato\"))\n\n @property\n def is_active(self):\n return timezone.now().date() < self.expiration_date\n\n def save(self, *args, **kwargs):\n self.username = self.username.lower()\n super(AllowedUsername, self).save(*args, **kwargs)\n\n def __unicode__(self):\n return self.username\n\n class Meta:\n verbose_name = _(u\"medlem\")\n verbose_name_plural = _(u\"medlemsregister\")\n ordering = (u\"username\",)\n\n\nclass Position(models.Model):\n \"\"\"\n Contains a users position in the organization from a given year\n \"\"\"\n period = models.CharField(_(u'periode'), max_length=9, default=\"2013-2014\", blank=False)\n committee = models.CharField(_(u\"komite\"), max_length=10, choices=COMMITTEES, default=\"hs\")\n position = models.CharField(_(u\"stilling\"), max_length=10, choices=POSITIONS, default=\"medlem\")\n user = models.ForeignKey(OnlineUser, related_name='positions', blank=False)\n\n @property\n def print_string(self):\n return '%s: %s(%s)' % (self.period, self.committee, self.position)\n\n def __unicode__(self):\n return self.print_string\n\n class Meta:\n verbose_name = _(u'posisjon')\n verbose_name_plural = _(u'posisjoner')\n ordering = (u'user', u'period', )\n\n# Static method for resetting all users mark rules accepted field to false due to changes in mark rules\ndef reset_marks_acceptance():\n for user in OnlineUser.objects.all():\n user.mark_rules = False\n user.save()\n\n# Register OnlineUser in watson index for searching\nwatson.register(OnlineUser)\n", "path": "apps/authentication/models.py"}]}
| 3,673 | 118 |
gh_patches_debug_6124
|
rasdani/github-patches
|
git_diff
|
conan-io__conan-3087
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
package_id() regression bug in conan 1.4.x?
Consider the following `conanfile.py`:
```python
from conans import ConanFile
class TestConan(ConanFile):
name = "Test"
version = "0.0.1"
settings = "os", "arch"
def package_id(self):
self.info.include_build_settings()
self.info.settings.os_build = self.info.settings.os
self.info.settings.arch_build = self.info.settings.arch
del self.info.settings.os
del self.info.settings.arch
print(">>>>>>>>>>>>>>>>>>>>>>>>>>>> os_build: ",str(self.info.settings.os_build))
print(">>>>>>>>>>>>>>>>>>>>>>>>>>>> arch_build: ",str(self.info.settings.arch_build))
```
Now test it with conan 1.3.3:
```
C:\Users\dbely\conan\conan-test>pip install conan==1.3.3
...
C:\Users\dbely\conan\conan-test>conan create . dbely/testing
...
Test/0.0.1@dbely/testing: The stored package has not changed
>>>>>>>>>>>>>>>>>>>>>>>>>>>> os_build: Windows
>>>>>>>>>>>>>>>>>>>>>>>>>>>> arch_build: x86_64
Test/0.0.1@dbely/testing: Installing package
...
C:\Users\dbely\conan\conan-test>conan search Test/0.0.1@dbely/testing
Existing packages for recipe Test/0.0.1@dbely/testing:
Package_ID: 456f15897172eef340fcbac8a70811f2beb26a93
[settings]
arch_build: x86_64
os_build: Windows
Outdated from recipe: False
```
Everything is good. Upgrade to conan 1.4.4 (all 1.4.x versions behave the same) and try again:
```
C:\Users\dbely\conan\conan-test>pip install conan==1.4.4
...
C:\Users\dbely\conan\conan-test>conan create . dbely/testing
...
Test/0.0.1@dbely/testing: A new conanfile.py version was exported
Test/0.0.1@dbely/testing: Folder: C:\Users\dbely\.conan\data\Test\0.0.1\dbely\testing\export
>>>>>>>>>>>>>>>>>>>>>>>>>>>> os_build: Windows
>>>>>>>>>>>>>>>>>>>>>>>>>>>> arch_build: x86_64
Test/0.0.1@dbely/testing: Installing package
>>>>>>>>>>>>>>>>>>>>>>>>>>>> os_build: None
>>>>>>>>>>>>>>>>>>>>>>>>>>>> arch_build: None
...
C:\Users\dbely\conan\conan-test>conan search Test/0.0.1@dbely/testing
Existing packages for recipe Test/0.0.1@dbely/testing:
Package_ID: 456f15897172eef340fcbac8a70811f2beb26a93
[settings]
arch_build: None
os_build: None
Outdated from recipe: False
```
Oops! `package_id()` is now called twice and after the second call `os_build` and `arch_build` are set to `None`. Looks like a bug to me.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `conans/client/graph/printer.py`
Content:
```
1 from conans.client.output import Color
2 from conans.model.ref import PackageReference
3 from conans.model.workspace import WORKSPACE_FILE
4
5
6 def print_graph(deps_graph, out):
7 all_nodes = []
8 ids = set()
9 for node in sorted(n for n in deps_graph.nodes if n.conan_ref):
10 package_id = PackageReference(node.conan_ref, node.conanfile.package_id())
11 if package_id not in ids:
12 all_nodes.append(node)
13 ids.add(package_id)
14 requires = [n for n in all_nodes]
15 out.writeln("Requirements", Color.BRIGHT_YELLOW)
16
17 def _recipes(nodes):
18 for node in nodes:
19 if node.remote == WORKSPACE_FILE:
20 from_text = "from '%s'" % WORKSPACE_FILE
21 else:
22 from_text = "from local cache" if not node.remote else "from '%s'" % node.remote.name
23 out.writeln(" %s %s" % (repr(node.conan_ref), from_text), Color.BRIGHT_CYAN)
24 _recipes(requires)
25 out.writeln("Packages", Color.BRIGHT_YELLOW)
26
27 def _packages(nodes):
28 for node in nodes:
29 ref, conanfile = node.conan_ref, node.conanfile
30 ref = PackageReference(ref, conanfile.info.package_id())
31 out.writeln(" %s" % (repr(ref)), Color.BRIGHT_CYAN)
32 _packages(requires)
33
34 out.writeln("")
35
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/conans/client/graph/printer.py b/conans/client/graph/printer.py
--- a/conans/client/graph/printer.py
+++ b/conans/client/graph/printer.py
@@ -7,7 +7,7 @@
all_nodes = []
ids = set()
for node in sorted(n for n in deps_graph.nodes if n.conan_ref):
- package_id = PackageReference(node.conan_ref, node.conanfile.package_id())
+ package_id = PackageReference(node.conan_ref, node.conanfile.info.package_id())
if package_id not in ids:
all_nodes.append(node)
ids.add(package_id)
|
{"golden_diff": "diff --git a/conans/client/graph/printer.py b/conans/client/graph/printer.py\n--- a/conans/client/graph/printer.py\n+++ b/conans/client/graph/printer.py\n@@ -7,7 +7,7 @@\n all_nodes = []\n ids = set()\n for node in sorted(n for n in deps_graph.nodes if n.conan_ref):\n- package_id = PackageReference(node.conan_ref, node.conanfile.package_id())\n+ package_id = PackageReference(node.conan_ref, node.conanfile.info.package_id())\n if package_id not in ids:\n all_nodes.append(node)\n ids.add(package_id)\n", "issue": "package_id() regression bug in conan 1.4.x?\nConsider the following `conanfile.py`:\r\n```python\r\nfrom conans import ConanFile\r\n\r\nclass TestConan(ConanFile):\r\n name = \"Test\"\r\n version = \"0.0.1\"\r\n settings = \"os\", \"arch\"\r\n\r\n def package_id(self):\r\n self.info.include_build_settings()\r\n self.info.settings.os_build = self.info.settings.os\r\n self.info.settings.arch_build = self.info.settings.arch\r\n del self.info.settings.os\r\n del self.info.settings.arch\r\n print(\">>>>>>>>>>>>>>>>>>>>>>>>>>>> os_build: \",str(self.info.settings.os_build))\r\n print(\">>>>>>>>>>>>>>>>>>>>>>>>>>>> arch_build: \",str(self.info.settings.arch_build))\r\n```\r\n\r\nNow test it with conan 1.3.3:\r\n```\r\nC:\\Users\\dbely\\conan\\conan-test>pip install conan==1.3.3\r\n...\r\nC:\\Users\\dbely\\conan\\conan-test>conan create . dbely/testing\r\n...\r\nTest/0.0.1@dbely/testing: The stored package has not changed\r\n>>>>>>>>>>>>>>>>>>>>>>>>>>>> os_build: Windows\r\n>>>>>>>>>>>>>>>>>>>>>>>>>>>> arch_build: x86_64\r\nTest/0.0.1@dbely/testing: Installing package\r\n...\r\nC:\\Users\\dbely\\conan\\conan-test>conan search Test/0.0.1@dbely/testing\r\nExisting packages for recipe Test/0.0.1@dbely/testing:\r\n\r\n Package_ID: 456f15897172eef340fcbac8a70811f2beb26a93\r\n [settings]\r\n arch_build: x86_64\r\n os_build: Windows\r\n Outdated from recipe: False\r\n```\r\nEverything is good. Upgrade to conan 1.4.4 (all 1.4.x versions behave the same) and try again:\r\n```\r\nC:\\Users\\dbely\\conan\\conan-test>pip install conan==1.4.4\r\n...\r\nC:\\Users\\dbely\\conan\\conan-test>conan create . dbely/testing\r\n...\r\nTest/0.0.1@dbely/testing: A new conanfile.py version was exported\r\nTest/0.0.1@dbely/testing: Folder: C:\\Users\\dbely\\.conan\\data\\Test\\0.0.1\\dbely\\testing\\export\r\n>>>>>>>>>>>>>>>>>>>>>>>>>>>> os_build: Windows\r\n>>>>>>>>>>>>>>>>>>>>>>>>>>>> arch_build: x86_64\r\nTest/0.0.1@dbely/testing: Installing package\r\n>>>>>>>>>>>>>>>>>>>>>>>>>>>> os_build: None\r\n>>>>>>>>>>>>>>>>>>>>>>>>>>>> arch_build: None\r\n...\r\nC:\\Users\\dbely\\conan\\conan-test>conan search Test/0.0.1@dbely/testing\r\nExisting packages for recipe Test/0.0.1@dbely/testing:\r\n\r\n Package_ID: 456f15897172eef340fcbac8a70811f2beb26a93\r\n [settings]\r\n arch_build: None\r\n os_build: None\r\n Outdated from recipe: False\r\n```\r\nOops! `package_id()` is now called twice and after the second call `os_build` and `arch_build` are set to `None`. Looks like a bug to me. \n", "before_files": [{"content": "from conans.client.output import Color\nfrom conans.model.ref import PackageReference\nfrom conans.model.workspace import WORKSPACE_FILE\n\n\ndef print_graph(deps_graph, out):\n all_nodes = []\n ids = set()\n for node in sorted(n for n in deps_graph.nodes if n.conan_ref):\n package_id = PackageReference(node.conan_ref, node.conanfile.package_id())\n if package_id not in ids:\n all_nodes.append(node)\n ids.add(package_id)\n requires = [n for n in all_nodes]\n out.writeln(\"Requirements\", Color.BRIGHT_YELLOW)\n\n def _recipes(nodes):\n for node in nodes:\n if node.remote == WORKSPACE_FILE:\n from_text = \"from '%s'\" % WORKSPACE_FILE\n else:\n from_text = \"from local cache\" if not node.remote else \"from '%s'\" % node.remote.name\n out.writeln(\" %s %s\" % (repr(node.conan_ref), from_text), Color.BRIGHT_CYAN)\n _recipes(requires)\n out.writeln(\"Packages\", Color.BRIGHT_YELLOW)\n\n def _packages(nodes):\n for node in nodes:\n ref, conanfile = node.conan_ref, node.conanfile\n ref = PackageReference(ref, conanfile.info.package_id())\n out.writeln(\" %s\" % (repr(ref)), Color.BRIGHT_CYAN)\n _packages(requires)\n\n out.writeln(\"\")\n", "path": "conans/client/graph/printer.py"}], "after_files": [{"content": "from conans.client.output import Color\nfrom conans.model.ref import PackageReference\nfrom conans.model.workspace import WORKSPACE_FILE\n\n\ndef print_graph(deps_graph, out):\n all_nodes = []\n ids = set()\n for node in sorted(n for n in deps_graph.nodes if n.conan_ref):\n package_id = PackageReference(node.conan_ref, node.conanfile.info.package_id())\n if package_id not in ids:\n all_nodes.append(node)\n ids.add(package_id)\n requires = [n for n in all_nodes]\n out.writeln(\"Requirements\", Color.BRIGHT_YELLOW)\n\n def _recipes(nodes):\n for node in nodes:\n if node.remote == WORKSPACE_FILE:\n from_text = \"from '%s'\" % WORKSPACE_FILE\n else:\n from_text = \"from local cache\" if not node.remote else \"from '%s'\" % node.remote.name\n out.writeln(\" %s %s\" % (repr(node.conan_ref), from_text), Color.BRIGHT_CYAN)\n _recipes(requires)\n out.writeln(\"Packages\", Color.BRIGHT_YELLOW)\n\n def _packages(nodes):\n for node in nodes:\n ref, conanfile = node.conan_ref, node.conanfile\n ref = PackageReference(ref, conanfile.info.package_id())\n out.writeln(\" %s\" % (repr(ref)), Color.BRIGHT_CYAN)\n _packages(requires)\n\n out.writeln(\"\")\n", "path": "conans/client/graph/printer.py"}]}
| 1,372 | 137 |
gh_patches_debug_25780
|
rasdani/github-patches
|
git_diff
|
pre-commit__pre-commit-1382
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
No colors when hooks are run by a git commit
Hi,
I use pre-commit at home on linux which works perfect. But at work I have a windows pc. Here I have problems with the colorfull output.
When the hooks are run by `tox` calling `pre-commit run` there are colors as usual. When the hooks are run by a `git commit` the colors are missing.
Concrete I mean the green for 'Passed', red for 'Failed ' and yellow/brown for 'Skipped' in the overview.
There is no difference if I run it via git-bash, cmd or powershell. Also there is no difference if I use the pycharm buildin terminal or others.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pre_commit/color.py`
Content:
```
1 import os
2 import sys
3
4 if sys.platform == 'win32': # pragma: no cover (windows)
5 def _enable() -> None:
6 from ctypes import POINTER
7 from ctypes import windll
8 from ctypes import WinError
9 from ctypes import WINFUNCTYPE
10 from ctypes.wintypes import BOOL
11 from ctypes.wintypes import DWORD
12 from ctypes.wintypes import HANDLE
13
14 STD_OUTPUT_HANDLE = -11
15 ENABLE_VIRTUAL_TERMINAL_PROCESSING = 4
16
17 def bool_errcheck(result, func, args):
18 if not result:
19 raise WinError()
20 return args
21
22 GetStdHandle = WINFUNCTYPE(HANDLE, DWORD)(
23 ('GetStdHandle', windll.kernel32), ((1, 'nStdHandle'),),
24 )
25
26 GetConsoleMode = WINFUNCTYPE(BOOL, HANDLE, POINTER(DWORD))(
27 ('GetConsoleMode', windll.kernel32),
28 ((1, 'hConsoleHandle'), (2, 'lpMode')),
29 )
30 GetConsoleMode.errcheck = bool_errcheck
31
32 SetConsoleMode = WINFUNCTYPE(BOOL, HANDLE, DWORD)(
33 ('SetConsoleMode', windll.kernel32),
34 ((1, 'hConsoleHandle'), (1, 'dwMode')),
35 )
36 SetConsoleMode.errcheck = bool_errcheck
37
38 # As of Windows 10, the Windows console supports (some) ANSI escape
39 # sequences, but it needs to be enabled using `SetConsoleMode` first.
40 #
41 # More info on the escape sequences supported:
42 # https://msdn.microsoft.com/en-us/library/windows/desktop/mt638032(v=vs.85).aspx
43 stdout = GetStdHandle(STD_OUTPUT_HANDLE)
44 flags = GetConsoleMode(stdout)
45 SetConsoleMode(stdout, flags | ENABLE_VIRTUAL_TERMINAL_PROCESSING)
46
47 try:
48 _enable()
49 except OSError:
50 terminal_supports_color = False
51 else:
52 terminal_supports_color = True
53 else: # pragma: win32 no cover
54 terminal_supports_color = True
55
56 RED = '\033[41m'
57 GREEN = '\033[42m'
58 YELLOW = '\033[43;30m'
59 TURQUOISE = '\033[46;30m'
60 SUBTLE = '\033[2m'
61 NORMAL = '\033[m'
62
63
64 def format_color(text: str, color: str, use_color_setting: bool) -> str:
65 """Format text with color.
66
67 Args:
68 text - Text to be formatted with color if `use_color`
69 color - The color start string
70 use_color_setting - Whether or not to color
71 """
72 if use_color_setting:
73 return f'{color}{text}{NORMAL}'
74 else:
75 return text
76
77
78 COLOR_CHOICES = ('auto', 'always', 'never')
79
80
81 def use_color(setting: str) -> bool:
82 """Choose whether to use color based on the command argument.
83
84 Args:
85 setting - Either `auto`, `always`, or `never`
86 """
87 if setting not in COLOR_CHOICES:
88 raise ValueError(setting)
89
90 return (
91 setting == 'always' or (
92 setting == 'auto' and
93 sys.stdout.isatty() and
94 terminal_supports_color and
95 os.getenv('TERM') != 'dumb'
96 )
97 )
98
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pre_commit/color.py b/pre_commit/color.py
--- a/pre_commit/color.py
+++ b/pre_commit/color.py
@@ -11,7 +11,7 @@
from ctypes.wintypes import DWORD
from ctypes.wintypes import HANDLE
- STD_OUTPUT_HANDLE = -11
+ STD_ERROR_HANDLE = -12
ENABLE_VIRTUAL_TERMINAL_PROCESSING = 4
def bool_errcheck(result, func, args):
@@ -40,9 +40,9 @@
#
# More info on the escape sequences supported:
# https://msdn.microsoft.com/en-us/library/windows/desktop/mt638032(v=vs.85).aspx
- stdout = GetStdHandle(STD_OUTPUT_HANDLE)
- flags = GetConsoleMode(stdout)
- SetConsoleMode(stdout, flags | ENABLE_VIRTUAL_TERMINAL_PROCESSING)
+ stderr = GetStdHandle(STD_ERROR_HANDLE)
+ flags = GetConsoleMode(stderr)
+ SetConsoleMode(stderr, flags | ENABLE_VIRTUAL_TERMINAL_PROCESSING)
try:
_enable()
@@ -90,7 +90,7 @@
return (
setting == 'always' or (
setting == 'auto' and
- sys.stdout.isatty() and
+ sys.stderr.isatty() and
terminal_supports_color and
os.getenv('TERM') != 'dumb'
)
|
{"golden_diff": "diff --git a/pre_commit/color.py b/pre_commit/color.py\n--- a/pre_commit/color.py\n+++ b/pre_commit/color.py\n@@ -11,7 +11,7 @@\n from ctypes.wintypes import DWORD\n from ctypes.wintypes import HANDLE\n \n- STD_OUTPUT_HANDLE = -11\n+ STD_ERROR_HANDLE = -12\n ENABLE_VIRTUAL_TERMINAL_PROCESSING = 4\n \n def bool_errcheck(result, func, args):\n@@ -40,9 +40,9 @@\n #\n # More info on the escape sequences supported:\n # https://msdn.microsoft.com/en-us/library/windows/desktop/mt638032(v=vs.85).aspx\n- stdout = GetStdHandle(STD_OUTPUT_HANDLE)\n- flags = GetConsoleMode(stdout)\n- SetConsoleMode(stdout, flags | ENABLE_VIRTUAL_TERMINAL_PROCESSING)\n+ stderr = GetStdHandle(STD_ERROR_HANDLE)\n+ flags = GetConsoleMode(stderr)\n+ SetConsoleMode(stderr, flags | ENABLE_VIRTUAL_TERMINAL_PROCESSING)\n \n try:\n _enable()\n@@ -90,7 +90,7 @@\n return (\n setting == 'always' or (\n setting == 'auto' and\n- sys.stdout.isatty() and\n+ sys.stderr.isatty() and\n terminal_supports_color and\n os.getenv('TERM') != 'dumb'\n )\n", "issue": "No colors when hooks are run by a git commit\nHi,\r\nI use pre-commit at home on linux which works perfect. But at work I have a windows pc. Here I have problems with the colorfull output. \r\n\r\nWhen the hooks are run by `tox` calling `pre-commit run` there are colors as usual. When the hooks are run by a `git commit` the colors are missing.\r\n\r\nConcrete I mean the green for 'Passed', red for 'Failed ' and yellow/brown for 'Skipped' in the overview.\r\n\r\nThere is no difference if I run it via git-bash, cmd or powershell. Also there is no difference if I use the pycharm buildin terminal or others.\n", "before_files": [{"content": "import os\nimport sys\n\nif sys.platform == 'win32': # pragma: no cover (windows)\n def _enable() -> None:\n from ctypes import POINTER\n from ctypes import windll\n from ctypes import WinError\n from ctypes import WINFUNCTYPE\n from ctypes.wintypes import BOOL\n from ctypes.wintypes import DWORD\n from ctypes.wintypes import HANDLE\n\n STD_OUTPUT_HANDLE = -11\n ENABLE_VIRTUAL_TERMINAL_PROCESSING = 4\n\n def bool_errcheck(result, func, args):\n if not result:\n raise WinError()\n return args\n\n GetStdHandle = WINFUNCTYPE(HANDLE, DWORD)(\n ('GetStdHandle', windll.kernel32), ((1, 'nStdHandle'),),\n )\n\n GetConsoleMode = WINFUNCTYPE(BOOL, HANDLE, POINTER(DWORD))(\n ('GetConsoleMode', windll.kernel32),\n ((1, 'hConsoleHandle'), (2, 'lpMode')),\n )\n GetConsoleMode.errcheck = bool_errcheck\n\n SetConsoleMode = WINFUNCTYPE(BOOL, HANDLE, DWORD)(\n ('SetConsoleMode', windll.kernel32),\n ((1, 'hConsoleHandle'), (1, 'dwMode')),\n )\n SetConsoleMode.errcheck = bool_errcheck\n\n # As of Windows 10, the Windows console supports (some) ANSI escape\n # sequences, but it needs to be enabled using `SetConsoleMode` first.\n #\n # More info on the escape sequences supported:\n # https://msdn.microsoft.com/en-us/library/windows/desktop/mt638032(v=vs.85).aspx\n stdout = GetStdHandle(STD_OUTPUT_HANDLE)\n flags = GetConsoleMode(stdout)\n SetConsoleMode(stdout, flags | ENABLE_VIRTUAL_TERMINAL_PROCESSING)\n\n try:\n _enable()\n except OSError:\n terminal_supports_color = False\n else:\n terminal_supports_color = True\nelse: # pragma: win32 no cover\n terminal_supports_color = True\n\nRED = '\\033[41m'\nGREEN = '\\033[42m'\nYELLOW = '\\033[43;30m'\nTURQUOISE = '\\033[46;30m'\nSUBTLE = '\\033[2m'\nNORMAL = '\\033[m'\n\n\ndef format_color(text: str, color: str, use_color_setting: bool) -> str:\n \"\"\"Format text with color.\n\n Args:\n text - Text to be formatted with color if `use_color`\n color - The color start string\n use_color_setting - Whether or not to color\n \"\"\"\n if use_color_setting:\n return f'{color}{text}{NORMAL}'\n else:\n return text\n\n\nCOLOR_CHOICES = ('auto', 'always', 'never')\n\n\ndef use_color(setting: str) -> bool:\n \"\"\"Choose whether to use color based on the command argument.\n\n Args:\n setting - Either `auto`, `always`, or `never`\n \"\"\"\n if setting not in COLOR_CHOICES:\n raise ValueError(setting)\n\n return (\n setting == 'always' or (\n setting == 'auto' and\n sys.stdout.isatty() and\n terminal_supports_color and\n os.getenv('TERM') != 'dumb'\n )\n )\n", "path": "pre_commit/color.py"}], "after_files": [{"content": "import os\nimport sys\n\nif sys.platform == 'win32': # pragma: no cover (windows)\n def _enable() -> None:\n from ctypes import POINTER\n from ctypes import windll\n from ctypes import WinError\n from ctypes import WINFUNCTYPE\n from ctypes.wintypes import BOOL\n from ctypes.wintypes import DWORD\n from ctypes.wintypes import HANDLE\n\n STD_ERROR_HANDLE = -12\n ENABLE_VIRTUAL_TERMINAL_PROCESSING = 4\n\n def bool_errcheck(result, func, args):\n if not result:\n raise WinError()\n return args\n\n GetStdHandle = WINFUNCTYPE(HANDLE, DWORD)(\n ('GetStdHandle', windll.kernel32), ((1, 'nStdHandle'),),\n )\n\n GetConsoleMode = WINFUNCTYPE(BOOL, HANDLE, POINTER(DWORD))(\n ('GetConsoleMode', windll.kernel32),\n ((1, 'hConsoleHandle'), (2, 'lpMode')),\n )\n GetConsoleMode.errcheck = bool_errcheck\n\n SetConsoleMode = WINFUNCTYPE(BOOL, HANDLE, DWORD)(\n ('SetConsoleMode', windll.kernel32),\n ((1, 'hConsoleHandle'), (1, 'dwMode')),\n )\n SetConsoleMode.errcheck = bool_errcheck\n\n # As of Windows 10, the Windows console supports (some) ANSI escape\n # sequences, but it needs to be enabled using `SetConsoleMode` first.\n #\n # More info on the escape sequences supported:\n # https://msdn.microsoft.com/en-us/library/windows/desktop/mt638032(v=vs.85).aspx\n stderr = GetStdHandle(STD_ERROR_HANDLE)\n flags = GetConsoleMode(stderr)\n SetConsoleMode(stderr, flags | ENABLE_VIRTUAL_TERMINAL_PROCESSING)\n\n try:\n _enable()\n except OSError:\n terminal_supports_color = False\n else:\n terminal_supports_color = True\nelse: # pragma: win32 no cover\n terminal_supports_color = True\n\nRED = '\\033[41m'\nGREEN = '\\033[42m'\nYELLOW = '\\033[43;30m'\nTURQUOISE = '\\033[46;30m'\nSUBTLE = '\\033[2m'\nNORMAL = '\\033[m'\n\n\ndef format_color(text: str, color: str, use_color_setting: bool) -> str:\n \"\"\"Format text with color.\n\n Args:\n text - Text to be formatted with color if `use_color`\n color - The color start string\n use_color_setting - Whether or not to color\n \"\"\"\n if use_color_setting:\n return f'{color}{text}{NORMAL}'\n else:\n return text\n\n\nCOLOR_CHOICES = ('auto', 'always', 'never')\n\n\ndef use_color(setting: str) -> bool:\n \"\"\"Choose whether to use color based on the command argument.\n\n Args:\n setting - Either `auto`, `always`, or `never`\n \"\"\"\n if setting not in COLOR_CHOICES:\n raise ValueError(setting)\n\n return (\n setting == 'always' or (\n setting == 'auto' and\n sys.stderr.isatty() and\n terminal_supports_color and\n os.getenv('TERM') != 'dumb'\n )\n )\n", "path": "pre_commit/color.py"}]}
| 1,331 | 304 |
gh_patches_debug_1946
|
rasdani/github-patches
|
git_diff
|
tobymao__sqlglot-2365
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Support '' to escape single quote character in a string in Redshift dialect
**Fully reproducible code snippet**
```python
import sqlglot
sql_code = """
CREATE TABLE IF NOT EXISTS myschema.mytable (
mycolumn bigint,
)
DISTKEY (mycolumn)
SORTKEY (mycolumn)
;
COMMENT ON COLUMN myschema.mytable.mycolumn IS 'my example = \\'working\\'';
COMMENT ON COLUMN myschema.mytable.mycolumn IS 'my example = ''not working''';
"""
expressions = sqlglot.parse(sql_code, read="redshift")
```
Error:
```console
Traceback (most recent call last):
...
raise error
sqlglot.errors.ParseError: Invalid expression / Unexpected token. Line 9, Col: 75.
column IS 'my example = \'working\'';
COMMENT ON COLUMN myschema.mytable.mycolumn IS 'my example = ''not working''';
```
**Official Documentation**
I couldn't find the right documentation on AWS that explains this, but I ran the query on Redshift and it works perfectly.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sqlglot/dialects/redshift.py`
Content:
```
1 from __future__ import annotations
2
3 import typing as t
4
5 from sqlglot import exp, transforms
6 from sqlglot.dialects.dialect import (
7 concat_to_dpipe_sql,
8 concat_ws_to_dpipe_sql,
9 rename_func,
10 ts_or_ds_to_date_sql,
11 )
12 from sqlglot.dialects.postgres import Postgres
13 from sqlglot.helper import seq_get
14 from sqlglot.tokens import TokenType
15
16
17 def _json_sql(self: Redshift.Generator, expression: exp.JSONExtract | exp.JSONExtractScalar) -> str:
18 return f'{self.sql(expression, "this")}."{expression.expression.name}"'
19
20
21 def _parse_date_add(args: t.List) -> exp.DateAdd:
22 return exp.DateAdd(
23 this=exp.TsOrDsToDate(this=seq_get(args, 2)),
24 expression=seq_get(args, 1),
25 unit=seq_get(args, 0),
26 )
27
28
29 class Redshift(Postgres):
30 # https://docs.aws.amazon.com/redshift/latest/dg/r_names.html
31 RESOLVES_IDENTIFIERS_AS_UPPERCASE = None
32
33 SUPPORTS_USER_DEFINED_TYPES = False
34
35 TIME_FORMAT = "'YYYY-MM-DD HH:MI:SS'"
36 TIME_MAPPING = {
37 **Postgres.TIME_MAPPING,
38 "MON": "%b",
39 "HH": "%H",
40 }
41
42 class Parser(Postgres.Parser):
43 FUNCTIONS = {
44 **Postgres.Parser.FUNCTIONS,
45 "ADD_MONTHS": lambda args: exp.DateAdd(
46 this=exp.TsOrDsToDate(this=seq_get(args, 0)),
47 expression=seq_get(args, 1),
48 unit=exp.var("month"),
49 ),
50 "DATEADD": _parse_date_add,
51 "DATE_ADD": _parse_date_add,
52 "DATEDIFF": lambda args: exp.DateDiff(
53 this=exp.TsOrDsToDate(this=seq_get(args, 2)),
54 expression=exp.TsOrDsToDate(this=seq_get(args, 1)),
55 unit=seq_get(args, 0),
56 ),
57 "STRTOL": exp.FromBase.from_arg_list,
58 }
59
60 def _parse_types(
61 self, check_func: bool = False, schema: bool = False, allow_identifiers: bool = True
62 ) -> t.Optional[exp.Expression]:
63 this = super()._parse_types(
64 check_func=check_func, schema=schema, allow_identifiers=allow_identifiers
65 )
66
67 if (
68 isinstance(this, exp.DataType)
69 and this.is_type("varchar")
70 and this.expressions
71 and this.expressions[0].this == exp.column("MAX")
72 ):
73 this.set("expressions", [exp.var("MAX")])
74
75 return this
76
77 def _parse_convert(self, strict: bool) -> t.Optional[exp.Expression]:
78 to = self._parse_types()
79 self._match(TokenType.COMMA)
80 this = self._parse_bitwise()
81 return self.expression(exp.TryCast, this=this, to=to)
82
83 class Tokenizer(Postgres.Tokenizer):
84 BIT_STRINGS = []
85 HEX_STRINGS = []
86 STRING_ESCAPES = ["\\"]
87
88 KEYWORDS = {
89 **Postgres.Tokenizer.KEYWORDS,
90 "HLLSKETCH": TokenType.HLLSKETCH,
91 "SUPER": TokenType.SUPER,
92 "SYSDATE": TokenType.CURRENT_TIMESTAMP,
93 "TOP": TokenType.TOP,
94 "UNLOAD": TokenType.COMMAND,
95 "VARBYTE": TokenType.VARBINARY,
96 }
97
98 # Redshift allows # to appear as a table identifier prefix
99 SINGLE_TOKENS = Postgres.Tokenizer.SINGLE_TOKENS.copy()
100 SINGLE_TOKENS.pop("#")
101
102 class Generator(Postgres.Generator):
103 LOCKING_READS_SUPPORTED = False
104 RENAME_TABLE_WITH_DB = False
105 QUERY_HINTS = False
106 VALUES_AS_TABLE = False
107 TZ_TO_WITH_TIME_ZONE = True
108 NVL2_SUPPORTED = True
109
110 TYPE_MAPPING = {
111 **Postgres.Generator.TYPE_MAPPING,
112 exp.DataType.Type.BINARY: "VARBYTE",
113 exp.DataType.Type.INT: "INTEGER",
114 exp.DataType.Type.TIMETZ: "TIME",
115 exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP",
116 exp.DataType.Type.VARBINARY: "VARBYTE",
117 }
118
119 PROPERTIES_LOCATION = {
120 **Postgres.Generator.PROPERTIES_LOCATION,
121 exp.LikeProperty: exp.Properties.Location.POST_WITH,
122 }
123
124 TRANSFORMS = {
125 **Postgres.Generator.TRANSFORMS,
126 exp.Concat: concat_to_dpipe_sql,
127 exp.ConcatWs: concat_ws_to_dpipe_sql,
128 exp.CurrentTimestamp: lambda self, e: "SYSDATE",
129 exp.DateAdd: lambda self, e: self.func(
130 "DATEADD", exp.var(e.text("unit") or "day"), e.expression, e.this
131 ),
132 exp.DateDiff: lambda self, e: self.func(
133 "DATEDIFF", exp.var(e.text("unit") or "day"), e.expression, e.this
134 ),
135 exp.DistKeyProperty: lambda self, e: f"DISTKEY({e.name})",
136 exp.DistStyleProperty: lambda self, e: self.naked_property(e),
137 exp.FromBase: rename_func("STRTOL"),
138 exp.JSONExtract: _json_sql,
139 exp.JSONExtractScalar: _json_sql,
140 exp.SafeConcat: concat_to_dpipe_sql,
141 exp.Select: transforms.preprocess(
142 [transforms.eliminate_distinct_on, transforms.eliminate_semi_and_anti_joins]
143 ),
144 exp.SortKeyProperty: lambda self, e: f"{'COMPOUND ' if e.args['compound'] else ''}SORTKEY({self.format_args(*e.this)})",
145 exp.TsOrDsToDate: ts_or_ds_to_date_sql("redshift"),
146 }
147
148 # Postgres maps exp.Pivot to no_pivot_sql, but Redshift support pivots
149 TRANSFORMS.pop(exp.Pivot)
150
151 # Redshift uses the POW | POWER (expr1, expr2) syntax instead of expr1 ^ expr2 (postgres)
152 TRANSFORMS.pop(exp.Pow)
153
154 # Redshift supports ANY_VALUE(..)
155 TRANSFORMS.pop(exp.AnyValue)
156
157 RESERVED_KEYWORDS = {*Postgres.Generator.RESERVED_KEYWORDS, "snapshot", "type"}
158
159 def with_properties(self, properties: exp.Properties) -> str:
160 """Redshift doesn't have `WITH` as part of their with_properties so we remove it"""
161 return self.properties(properties, prefix=" ", suffix="")
162
163 def datatype_sql(self, expression: exp.DataType) -> str:
164 """
165 Redshift converts the `TEXT` data type to `VARCHAR(255)` by default when people more generally mean
166 VARCHAR of max length which is `VARCHAR(max)` in Redshift. Therefore if we get a `TEXT` data type
167 without precision we convert it to `VARCHAR(max)` and if it does have precision then we just convert
168 `TEXT` to `VARCHAR`.
169 """
170 if expression.is_type("text"):
171 expression = expression.copy()
172 expression.set("this", exp.DataType.Type.VARCHAR)
173 precision = expression.args.get("expressions")
174
175 if not precision:
176 expression.append("expressions", exp.var("MAX"))
177
178 return super().datatype_sql(expression)
179
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/sqlglot/dialects/redshift.py b/sqlglot/dialects/redshift.py
--- a/sqlglot/dialects/redshift.py
+++ b/sqlglot/dialects/redshift.py
@@ -83,7 +83,7 @@
class Tokenizer(Postgres.Tokenizer):
BIT_STRINGS = []
HEX_STRINGS = []
- STRING_ESCAPES = ["\\"]
+ STRING_ESCAPES = ["\\", "'"]
KEYWORDS = {
**Postgres.Tokenizer.KEYWORDS,
|
{"golden_diff": "diff --git a/sqlglot/dialects/redshift.py b/sqlglot/dialects/redshift.py\n--- a/sqlglot/dialects/redshift.py\n+++ b/sqlglot/dialects/redshift.py\n@@ -83,7 +83,7 @@\n class Tokenizer(Postgres.Tokenizer):\n BIT_STRINGS = []\n HEX_STRINGS = []\n- STRING_ESCAPES = [\"\\\\\"]\n+ STRING_ESCAPES = [\"\\\\\", \"'\"]\n \n KEYWORDS = {\n **Postgres.Tokenizer.KEYWORDS,\n", "issue": "Support '' to escape single quote character in a string in Redshift dialect\n**Fully reproducible code snippet**\r\n\r\n```python\r\nimport sqlglot\r\n\r\nsql_code = \"\"\"\r\nCREATE TABLE IF NOT EXISTS myschema.mytable (\r\n mycolumn bigint,\r\n)\r\nDISTKEY (mycolumn)\r\nSORTKEY (mycolumn)\r\n ;\r\nCOMMENT ON COLUMN myschema.mytable.mycolumn IS 'my example = \\\\'working\\\\'';\r\nCOMMENT ON COLUMN myschema.mytable.mycolumn IS 'my example = ''not working''';\r\n\"\"\"\r\nexpressions = sqlglot.parse(sql_code, read=\"redshift\")\r\n```\r\n\r\nError: \r\n\r\n```console\r\nTraceback (most recent call last):\r\n ...\r\n raise error\r\nsqlglot.errors.ParseError: Invalid expression / Unexpected token. Line 9, Col: 75.\r\n column IS 'my example = \\'working\\'';\r\nCOMMENT ON COLUMN myschema.mytable.mycolumn IS 'my example = ''not working''';\r\n```\r\n\r\n**Official Documentation**\r\n\r\nI couldn't find the right documentation on AWS that explains this, but I ran the query on Redshift and it works perfectly.\r\n\n", "before_files": [{"content": "from __future__ import annotations\n\nimport typing as t\n\nfrom sqlglot import exp, transforms\nfrom sqlglot.dialects.dialect import (\n concat_to_dpipe_sql,\n concat_ws_to_dpipe_sql,\n rename_func,\n ts_or_ds_to_date_sql,\n)\nfrom sqlglot.dialects.postgres import Postgres\nfrom sqlglot.helper import seq_get\nfrom sqlglot.tokens import TokenType\n\n\ndef _json_sql(self: Redshift.Generator, expression: exp.JSONExtract | exp.JSONExtractScalar) -> str:\n return f'{self.sql(expression, \"this\")}.\"{expression.expression.name}\"'\n\n\ndef _parse_date_add(args: t.List) -> exp.DateAdd:\n return exp.DateAdd(\n this=exp.TsOrDsToDate(this=seq_get(args, 2)),\n expression=seq_get(args, 1),\n unit=seq_get(args, 0),\n )\n\n\nclass Redshift(Postgres):\n # https://docs.aws.amazon.com/redshift/latest/dg/r_names.html\n RESOLVES_IDENTIFIERS_AS_UPPERCASE = None\n\n SUPPORTS_USER_DEFINED_TYPES = False\n\n TIME_FORMAT = \"'YYYY-MM-DD HH:MI:SS'\"\n TIME_MAPPING = {\n **Postgres.TIME_MAPPING,\n \"MON\": \"%b\",\n \"HH\": \"%H\",\n }\n\n class Parser(Postgres.Parser):\n FUNCTIONS = {\n **Postgres.Parser.FUNCTIONS,\n \"ADD_MONTHS\": lambda args: exp.DateAdd(\n this=exp.TsOrDsToDate(this=seq_get(args, 0)),\n expression=seq_get(args, 1),\n unit=exp.var(\"month\"),\n ),\n \"DATEADD\": _parse_date_add,\n \"DATE_ADD\": _parse_date_add,\n \"DATEDIFF\": lambda args: exp.DateDiff(\n this=exp.TsOrDsToDate(this=seq_get(args, 2)),\n expression=exp.TsOrDsToDate(this=seq_get(args, 1)),\n unit=seq_get(args, 0),\n ),\n \"STRTOL\": exp.FromBase.from_arg_list,\n }\n\n def _parse_types(\n self, check_func: bool = False, schema: bool = False, allow_identifiers: bool = True\n ) -> t.Optional[exp.Expression]:\n this = super()._parse_types(\n check_func=check_func, schema=schema, allow_identifiers=allow_identifiers\n )\n\n if (\n isinstance(this, exp.DataType)\n and this.is_type(\"varchar\")\n and this.expressions\n and this.expressions[0].this == exp.column(\"MAX\")\n ):\n this.set(\"expressions\", [exp.var(\"MAX\")])\n\n return this\n\n def _parse_convert(self, strict: bool) -> t.Optional[exp.Expression]:\n to = self._parse_types()\n self._match(TokenType.COMMA)\n this = self._parse_bitwise()\n return self.expression(exp.TryCast, this=this, to=to)\n\n class Tokenizer(Postgres.Tokenizer):\n BIT_STRINGS = []\n HEX_STRINGS = []\n STRING_ESCAPES = [\"\\\\\"]\n\n KEYWORDS = {\n **Postgres.Tokenizer.KEYWORDS,\n \"HLLSKETCH\": TokenType.HLLSKETCH,\n \"SUPER\": TokenType.SUPER,\n \"SYSDATE\": TokenType.CURRENT_TIMESTAMP,\n \"TOP\": TokenType.TOP,\n \"UNLOAD\": TokenType.COMMAND,\n \"VARBYTE\": TokenType.VARBINARY,\n }\n\n # Redshift allows # to appear as a table identifier prefix\n SINGLE_TOKENS = Postgres.Tokenizer.SINGLE_TOKENS.copy()\n SINGLE_TOKENS.pop(\"#\")\n\n class Generator(Postgres.Generator):\n LOCKING_READS_SUPPORTED = False\n RENAME_TABLE_WITH_DB = False\n QUERY_HINTS = False\n VALUES_AS_TABLE = False\n TZ_TO_WITH_TIME_ZONE = True\n NVL2_SUPPORTED = True\n\n TYPE_MAPPING = {\n **Postgres.Generator.TYPE_MAPPING,\n exp.DataType.Type.BINARY: \"VARBYTE\",\n exp.DataType.Type.INT: \"INTEGER\",\n exp.DataType.Type.TIMETZ: \"TIME\",\n exp.DataType.Type.TIMESTAMPTZ: \"TIMESTAMP\",\n exp.DataType.Type.VARBINARY: \"VARBYTE\",\n }\n\n PROPERTIES_LOCATION = {\n **Postgres.Generator.PROPERTIES_LOCATION,\n exp.LikeProperty: exp.Properties.Location.POST_WITH,\n }\n\n TRANSFORMS = {\n **Postgres.Generator.TRANSFORMS,\n exp.Concat: concat_to_dpipe_sql,\n exp.ConcatWs: concat_ws_to_dpipe_sql,\n exp.CurrentTimestamp: lambda self, e: \"SYSDATE\",\n exp.DateAdd: lambda self, e: self.func(\n \"DATEADD\", exp.var(e.text(\"unit\") or \"day\"), e.expression, e.this\n ),\n exp.DateDiff: lambda self, e: self.func(\n \"DATEDIFF\", exp.var(e.text(\"unit\") or \"day\"), e.expression, e.this\n ),\n exp.DistKeyProperty: lambda self, e: f\"DISTKEY({e.name})\",\n exp.DistStyleProperty: lambda self, e: self.naked_property(e),\n exp.FromBase: rename_func(\"STRTOL\"),\n exp.JSONExtract: _json_sql,\n exp.JSONExtractScalar: _json_sql,\n exp.SafeConcat: concat_to_dpipe_sql,\n exp.Select: transforms.preprocess(\n [transforms.eliminate_distinct_on, transforms.eliminate_semi_and_anti_joins]\n ),\n exp.SortKeyProperty: lambda self, e: f\"{'COMPOUND ' if e.args['compound'] else ''}SORTKEY({self.format_args(*e.this)})\",\n exp.TsOrDsToDate: ts_or_ds_to_date_sql(\"redshift\"),\n }\n\n # Postgres maps exp.Pivot to no_pivot_sql, but Redshift support pivots\n TRANSFORMS.pop(exp.Pivot)\n\n # Redshift uses the POW | POWER (expr1, expr2) syntax instead of expr1 ^ expr2 (postgres)\n TRANSFORMS.pop(exp.Pow)\n\n # Redshift supports ANY_VALUE(..)\n TRANSFORMS.pop(exp.AnyValue)\n\n RESERVED_KEYWORDS = {*Postgres.Generator.RESERVED_KEYWORDS, \"snapshot\", \"type\"}\n\n def with_properties(self, properties: exp.Properties) -> str:\n \"\"\"Redshift doesn't have `WITH` as part of their with_properties so we remove it\"\"\"\n return self.properties(properties, prefix=\" \", suffix=\"\")\n\n def datatype_sql(self, expression: exp.DataType) -> str:\n \"\"\"\n Redshift converts the `TEXT` data type to `VARCHAR(255)` by default when people more generally mean\n VARCHAR of max length which is `VARCHAR(max)` in Redshift. Therefore if we get a `TEXT` data type\n without precision we convert it to `VARCHAR(max)` and if it does have precision then we just convert\n `TEXT` to `VARCHAR`.\n \"\"\"\n if expression.is_type(\"text\"):\n expression = expression.copy()\n expression.set(\"this\", exp.DataType.Type.VARCHAR)\n precision = expression.args.get(\"expressions\")\n\n if not precision:\n expression.append(\"expressions\", exp.var(\"MAX\"))\n\n return super().datatype_sql(expression)\n", "path": "sqlglot/dialects/redshift.py"}], "after_files": [{"content": "from __future__ import annotations\n\nimport typing as t\n\nfrom sqlglot import exp, transforms\nfrom sqlglot.dialects.dialect import (\n concat_to_dpipe_sql,\n concat_ws_to_dpipe_sql,\n rename_func,\n ts_or_ds_to_date_sql,\n)\nfrom sqlglot.dialects.postgres import Postgres\nfrom sqlglot.helper import seq_get\nfrom sqlglot.tokens import TokenType\n\n\ndef _json_sql(self: Redshift.Generator, expression: exp.JSONExtract | exp.JSONExtractScalar) -> str:\n return f'{self.sql(expression, \"this\")}.\"{expression.expression.name}\"'\n\n\ndef _parse_date_add(args: t.List) -> exp.DateAdd:\n return exp.DateAdd(\n this=exp.TsOrDsToDate(this=seq_get(args, 2)),\n expression=seq_get(args, 1),\n unit=seq_get(args, 0),\n )\n\n\nclass Redshift(Postgres):\n # https://docs.aws.amazon.com/redshift/latest/dg/r_names.html\n RESOLVES_IDENTIFIERS_AS_UPPERCASE = None\n\n SUPPORTS_USER_DEFINED_TYPES = False\n\n TIME_FORMAT = \"'YYYY-MM-DD HH:MI:SS'\"\n TIME_MAPPING = {\n **Postgres.TIME_MAPPING,\n \"MON\": \"%b\",\n \"HH\": \"%H\",\n }\n\n class Parser(Postgres.Parser):\n FUNCTIONS = {\n **Postgres.Parser.FUNCTIONS,\n \"ADD_MONTHS\": lambda args: exp.DateAdd(\n this=exp.TsOrDsToDate(this=seq_get(args, 0)),\n expression=seq_get(args, 1),\n unit=exp.var(\"month\"),\n ),\n \"DATEADD\": _parse_date_add,\n \"DATE_ADD\": _parse_date_add,\n \"DATEDIFF\": lambda args: exp.DateDiff(\n this=exp.TsOrDsToDate(this=seq_get(args, 2)),\n expression=exp.TsOrDsToDate(this=seq_get(args, 1)),\n unit=seq_get(args, 0),\n ),\n \"STRTOL\": exp.FromBase.from_arg_list,\n }\n\n def _parse_types(\n self, check_func: bool = False, schema: bool = False, allow_identifiers: bool = True\n ) -> t.Optional[exp.Expression]:\n this = super()._parse_types(\n check_func=check_func, schema=schema, allow_identifiers=allow_identifiers\n )\n\n if (\n isinstance(this, exp.DataType)\n and this.is_type(\"varchar\")\n and this.expressions\n and this.expressions[0].this == exp.column(\"MAX\")\n ):\n this.set(\"expressions\", [exp.var(\"MAX\")])\n\n return this\n\n def _parse_convert(self, strict: bool) -> t.Optional[exp.Expression]:\n to = self._parse_types()\n self._match(TokenType.COMMA)\n this = self._parse_bitwise()\n return self.expression(exp.TryCast, this=this, to=to)\n\n class Tokenizer(Postgres.Tokenizer):\n BIT_STRINGS = []\n HEX_STRINGS = []\n STRING_ESCAPES = [\"\\\\\", \"'\"]\n\n KEYWORDS = {\n **Postgres.Tokenizer.KEYWORDS,\n \"HLLSKETCH\": TokenType.HLLSKETCH,\n \"SUPER\": TokenType.SUPER,\n \"SYSDATE\": TokenType.CURRENT_TIMESTAMP,\n \"TOP\": TokenType.TOP,\n \"UNLOAD\": TokenType.COMMAND,\n \"VARBYTE\": TokenType.VARBINARY,\n }\n\n # Redshift allows # to appear as a table identifier prefix\n SINGLE_TOKENS = Postgres.Tokenizer.SINGLE_TOKENS.copy()\n SINGLE_TOKENS.pop(\"#\")\n\n class Generator(Postgres.Generator):\n LOCKING_READS_SUPPORTED = False\n RENAME_TABLE_WITH_DB = False\n QUERY_HINTS = False\n VALUES_AS_TABLE = False\n TZ_TO_WITH_TIME_ZONE = True\n NVL2_SUPPORTED = True\n\n TYPE_MAPPING = {\n **Postgres.Generator.TYPE_MAPPING,\n exp.DataType.Type.BINARY: \"VARBYTE\",\n exp.DataType.Type.INT: \"INTEGER\",\n exp.DataType.Type.TIMETZ: \"TIME\",\n exp.DataType.Type.TIMESTAMPTZ: \"TIMESTAMP\",\n exp.DataType.Type.VARBINARY: \"VARBYTE\",\n }\n\n PROPERTIES_LOCATION = {\n **Postgres.Generator.PROPERTIES_LOCATION,\n exp.LikeProperty: exp.Properties.Location.POST_WITH,\n }\n\n TRANSFORMS = {\n **Postgres.Generator.TRANSFORMS,\n exp.Concat: concat_to_dpipe_sql,\n exp.ConcatWs: concat_ws_to_dpipe_sql,\n exp.CurrentTimestamp: lambda self, e: \"SYSDATE\",\n exp.DateAdd: lambda self, e: self.func(\n \"DATEADD\", exp.var(e.text(\"unit\") or \"day\"), e.expression, e.this\n ),\n exp.DateDiff: lambda self, e: self.func(\n \"DATEDIFF\", exp.var(e.text(\"unit\") or \"day\"), e.expression, e.this\n ),\n exp.DistKeyProperty: lambda self, e: f\"DISTKEY({e.name})\",\n exp.DistStyleProperty: lambda self, e: self.naked_property(e),\n exp.FromBase: rename_func(\"STRTOL\"),\n exp.JSONExtract: _json_sql,\n exp.JSONExtractScalar: _json_sql,\n exp.SafeConcat: concat_to_dpipe_sql,\n exp.Select: transforms.preprocess(\n [transforms.eliminate_distinct_on, transforms.eliminate_semi_and_anti_joins]\n ),\n exp.SortKeyProperty: lambda self, e: f\"{'COMPOUND ' if e.args['compound'] else ''}SORTKEY({self.format_args(*e.this)})\",\n exp.TsOrDsToDate: ts_or_ds_to_date_sql(\"redshift\"),\n }\n\n # Postgres maps exp.Pivot to no_pivot_sql, but Redshift support pivots\n TRANSFORMS.pop(exp.Pivot)\n\n # Redshift uses the POW | POWER (expr1, expr2) syntax instead of expr1 ^ expr2 (postgres)\n TRANSFORMS.pop(exp.Pow)\n\n # Redshift supports ANY_VALUE(..)\n TRANSFORMS.pop(exp.AnyValue)\n\n RESERVED_KEYWORDS = {*Postgres.Generator.RESERVED_KEYWORDS, \"snapshot\", \"type\"}\n\n def with_properties(self, properties: exp.Properties) -> str:\n \"\"\"Redshift doesn't have `WITH` as part of their with_properties so we remove it\"\"\"\n return self.properties(properties, prefix=\" \", suffix=\"\")\n\n def datatype_sql(self, expression: exp.DataType) -> str:\n \"\"\"\n Redshift converts the `TEXT` data type to `VARCHAR(255)` by default when people more generally mean\n VARCHAR of max length which is `VARCHAR(max)` in Redshift. Therefore if we get a `TEXT` data type\n without precision we convert it to `VARCHAR(max)` and if it does have precision then we just convert\n `TEXT` to `VARCHAR`.\n \"\"\"\n if expression.is_type(\"text\"):\n expression = expression.copy()\n expression.set(\"this\", exp.DataType.Type.VARCHAR)\n precision = expression.args.get(\"expressions\")\n\n if not precision:\n expression.append(\"expressions\", exp.var(\"MAX\"))\n\n return super().datatype_sql(expression)\n", "path": "sqlglot/dialects/redshift.py"}]}
| 2,497 | 118 |
gh_patches_debug_12404
|
rasdani/github-patches
|
git_diff
|
dask__distributed-1885
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Numeric environment variable configuration is broken
I recently found that a pipeline was throwing this error:
```
Traceback (most recent call last):
File "/usr/local/lib/python3.6/site-packages/distributed/utils.py", line 623, in log_errors
yield
File "/usr/local/lib/python3.6/site-packages/distributed/scheduler.py", line 1591, in remove_worker
if ts.suspicious > self.allowed_failures:
TypeError: '>' not supported between instances of 'int' and 'str'
```
The environment variable `DASK_ALLOWED_FAILURES=20` had been set. In a new terminal I ran the following:
```pya
>>> LocalCluster().scheduler.allowed_failures
'20'
```
There appears to be nothing in the code which ensures that the type of a configuration variable conforms to the expected type.
I'd be happy to submit a PR to fix this but I'm not really sure what form it should take. Some ideas:
1. Remove environment variable configuration completely (it doesn't seem to be documented?)
2. Use `pyyaml` to interpret environment variables the same way yaml interprets them
- (introduces are hard dependency on `pyyaml`)
3. Approximate the above by just automatically casting all env var config variables to ints where possible
4. Make a `get_config` which casts its return value to be the same type as the provided default; replace `config.get` everywhere. e.g.
```py
def get_config(key, default=None):
if default is not None:
return type(default)(config.get(key, default=default))
return config.get(key)
```
5. Change the `config` dict to be an instance of a subclass of dict which has `get()` overridden to behave as suggested above
6. Change `config.py` to use the example `yaml` file to correctly cast all config variables
7. Cast variables correctly at the point of `get` everywhere
- e.g. `ALLOWED_FAILURES = int(config.get('allowed-failures', 3))`
I think I'd probably go with 3, which seems like the lowest impact solution.
Of course, the workaround for now is to just use a yaml configuration file.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `distributed/config.py`
Content:
```
1 from __future__ import print_function, division, absolute_import
2
3 from contextlib import contextmanager
4 import logging
5 import logging.config
6 import os
7 import sys
8 import warnings
9
10 from .compatibility import FileExistsError, logging_names
11
12 logger = logging.getLogger(__name__)
13
14 config = {}
15
16
17 def ensure_config_file(source, destination):
18 if not os.path.exists(destination):
19 import shutil
20 if not os.path.exists(os.path.dirname(destination)):
21 try:
22 os.mkdir(os.path.dirname(destination))
23 except FileExistsError:
24 pass
25 # Atomically create destination. Parallel testing discovered
26 # a race condition where a process can be busy creating the
27 # destination while another process reads an empty config file.
28 tmp = '%s.tmp.%d' % (destination, os.getpid())
29 shutil.copy(source, tmp)
30 try:
31 os.rename(tmp, destination)
32 except OSError:
33 os.remove(tmp)
34
35
36 def determine_config_file():
37 path = os.environ.get('DASK_CONFIG')
38 if path:
39 if (os.path.exists(path) and
40 (os.path.isfile(path) or os.path.islink(path))):
41 return path
42 warnings.warn("DASK_CONFIG set to '%s' but file does not exist "
43 "or is not a regular file" % (path,),
44 UserWarning)
45
46 dirname = os.path.dirname(__file__)
47 default_path = os.path.join(dirname, 'config.yaml')
48 path = os.path.join(os.path.expanduser('~'), '.dask', 'config.yaml')
49
50 try:
51 ensure_config_file(default_path, path)
52 except EnvironmentError as e:
53 warnings.warn("Could not write default config file to '%s'. "
54 "Received error %s" % (path, e),
55 UserWarning)
56
57 return path if os.path.exists(path) else default_path
58
59
60 def load_config_file(config, path):
61 with open(path) as f:
62 text = f.read()
63 config.update(yaml.load(text) or {})
64
65
66 def load_env_vars(config):
67 for name, value in os.environ.items():
68 if name.startswith('DASK_'):
69 varname = name[5:].lower().replace('_', '-')
70 config[varname] = value
71
72
73 def _initialize_logging_old_style(config):
74 """
75 Initialize logging using the "old-style" configuration scheme, e.g.:
76 {
77 'logging': {
78 'distributed': 'info',
79 'tornado': 'critical',
80 'tornado.application': 'error',
81 }
82 }
83 """
84 loggers = { # default values
85 'distributed': 'info',
86 'distributed.client': 'warning',
87 'bokeh': 'critical',
88 'tornado': 'critical',
89 'tornado.application': 'error',
90 }
91 loggers.update(config.get('logging', {}))
92
93 handler = logging.StreamHandler(sys.stderr)
94 handler.setFormatter(logging.Formatter(log_format))
95 for name, level in loggers.items():
96 if isinstance(level, str):
97 level = logging_names[level.upper()]
98 logger = logging.getLogger(name)
99 logger.setLevel(level)
100 logger.handlers[:] = []
101 logger.addHandler(handler)
102 logger.propagate = False
103
104
105 def _initialize_logging_new_style(config):
106 """
107 Initialize logging using logging's "Configuration dictionary schema".
108 (ref.: https://docs.python.org/2/library/logging.config.html#logging-config-dictschema)
109 """
110 logging.config.dictConfig(config['logging'])
111
112
113 def _initialize_logging_file_config(config):
114 """
115 Initialize logging using logging's "Configuration file format".
116 (ref.: https://docs.python.org/2/library/logging.config.html#configuration-file-format)
117 """
118 logging.config.fileConfig(config['logging-file-config'], disable_existing_loggers=False)
119
120
121 def initialize_logging(config):
122 if 'logging-file-config' in config:
123 if 'logging' in config:
124 raise RuntimeError("Config options 'logging-file-config' and 'logging' are mutually exclusive.")
125 _initialize_logging_file_config(config)
126 else:
127 log_config = config.get('logging', {})
128 if 'version' in log_config:
129 # logging module mandates version to be an int
130 log_config['version'] = int(log_config['version'])
131 _initialize_logging_new_style(config)
132 else:
133 _initialize_logging_old_style(config)
134
135
136 @contextmanager
137 def set_config(arg=None, **kwargs):
138 if arg and not kwargs:
139 kwargs = arg
140 old = {}
141 for key in kwargs:
142 if key in config:
143 old[key] = config[key]
144
145 for key, value in kwargs.items():
146 config[key] = value
147
148 try:
149 yield
150 finally:
151 for key in kwargs:
152 if key in old:
153 config[key] = old[key]
154 else:
155 del config[key]
156
157
158 try:
159 import yaml
160 except ImportError:
161 pass
162 else:
163 path = determine_config_file()
164 load_config_file(config, path)
165
166 load_env_vars(config)
167
168 log_format = config.get('log-format', '%(name)s - %(levelname)s - %(message)s')
169
170 initialize_logging(config)
171
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/distributed/config.py b/distributed/config.py
--- a/distributed/config.py
+++ b/distributed/config.py
@@ -67,7 +67,24 @@
for name, value in os.environ.items():
if name.startswith('DASK_'):
varname = name[5:].lower().replace('_', '-')
- config[varname] = value
+ config[varname] = _parse_env_value(value)
+
+
+def _parse_env_value(value):
+ """ Convert a string to an integer, float or boolean (in that order) if possible. """
+ bools = {
+ 'true': True,
+ 'false': False
+ }
+ try:
+ return int(value)
+ except ValueError:
+ pass
+ try:
+ return float(value)
+ except ValueError:
+ pass
+ return bools.get(value.lower(), value)
def _initialize_logging_old_style(config):
|
{"golden_diff": "diff --git a/distributed/config.py b/distributed/config.py\n--- a/distributed/config.py\n+++ b/distributed/config.py\n@@ -67,7 +67,24 @@\n for name, value in os.environ.items():\n if name.startswith('DASK_'):\n varname = name[5:].lower().replace('_', '-')\n- config[varname] = value\n+ config[varname] = _parse_env_value(value)\n+\n+\n+def _parse_env_value(value):\n+ \"\"\" Convert a string to an integer, float or boolean (in that order) if possible. \"\"\"\n+ bools = {\n+ 'true': True,\n+ 'false': False\n+ }\n+ try:\n+ return int(value)\n+ except ValueError:\n+ pass\n+ try:\n+ return float(value)\n+ except ValueError:\n+ pass\n+ return bools.get(value.lower(), value)\n \n \n def _initialize_logging_old_style(config):\n", "issue": "Numeric environment variable configuration is broken\nI recently found that a pipeline was throwing this error:\r\n```\r\nTraceback (most recent call last):\r\n File \"/usr/local/lib/python3.6/site-packages/distributed/utils.py\", line 623, in log_errors\r\n yield\r\n File \"/usr/local/lib/python3.6/site-packages/distributed/scheduler.py\", line 1591, in remove_worker\r\n if ts.suspicious > self.allowed_failures:\r\nTypeError: '>' not supported between instances of 'int' and 'str'\r\n```\r\nThe environment variable `DASK_ALLOWED_FAILURES=20` had been set. In a new terminal I ran the following:\r\n```pya\r\n>>> LocalCluster().scheduler.allowed_failures\r\n'20'\r\n```\r\nThere appears to be nothing in the code which ensures that the type of a configuration variable conforms to the expected type.\r\n\r\nI'd be happy to submit a PR to fix this but I'm not really sure what form it should take. Some ideas:\r\n1. Remove environment variable configuration completely (it doesn't seem to be documented?)\r\n2. Use `pyyaml` to interpret environment variables the same way yaml interprets them\r\n - (introduces are hard dependency on `pyyaml`)\r\n3. Approximate the above by just automatically casting all env var config variables to ints where possible\r\n4. Make a `get_config` which casts its return value to be the same type as the provided default; replace `config.get` everywhere. e.g.\r\n```py\r\ndef get_config(key, default=None):\r\n if default is not None:\r\n return type(default)(config.get(key, default=default))\r\n return config.get(key)\r\n```\r\n5. Change the `config` dict to be an instance of a subclass of dict which has `get()` overridden to behave as suggested above\r\n6. Change `config.py` to use the example `yaml` file to correctly cast all config variables\r\n7. Cast variables correctly at the point of `get` everywhere\r\n - e.g. `ALLOWED_FAILURES = int(config.get('allowed-failures', 3))`\r\n\r\nI think I'd probably go with 3, which seems like the lowest impact solution.\r\n\r\nOf course, the workaround for now is to just use a yaml configuration file.\r\n\n", "before_files": [{"content": "from __future__ import print_function, division, absolute_import\n\nfrom contextlib import contextmanager\nimport logging\nimport logging.config\nimport os\nimport sys\nimport warnings\n\nfrom .compatibility import FileExistsError, logging_names\n\nlogger = logging.getLogger(__name__)\n\nconfig = {}\n\n\ndef ensure_config_file(source, destination):\n if not os.path.exists(destination):\n import shutil\n if not os.path.exists(os.path.dirname(destination)):\n try:\n os.mkdir(os.path.dirname(destination))\n except FileExistsError:\n pass\n # Atomically create destination. Parallel testing discovered\n # a race condition where a process can be busy creating the\n # destination while another process reads an empty config file.\n tmp = '%s.tmp.%d' % (destination, os.getpid())\n shutil.copy(source, tmp)\n try:\n os.rename(tmp, destination)\n except OSError:\n os.remove(tmp)\n\n\ndef determine_config_file():\n path = os.environ.get('DASK_CONFIG')\n if path:\n if (os.path.exists(path) and\n (os.path.isfile(path) or os.path.islink(path))):\n return path\n warnings.warn(\"DASK_CONFIG set to '%s' but file does not exist \"\n \"or is not a regular file\" % (path,),\n UserWarning)\n\n dirname = os.path.dirname(__file__)\n default_path = os.path.join(dirname, 'config.yaml')\n path = os.path.join(os.path.expanduser('~'), '.dask', 'config.yaml')\n\n try:\n ensure_config_file(default_path, path)\n except EnvironmentError as e:\n warnings.warn(\"Could not write default config file to '%s'. \"\n \"Received error %s\" % (path, e),\n UserWarning)\n\n return path if os.path.exists(path) else default_path\n\n\ndef load_config_file(config, path):\n with open(path) as f:\n text = f.read()\n config.update(yaml.load(text) or {})\n\n\ndef load_env_vars(config):\n for name, value in os.environ.items():\n if name.startswith('DASK_'):\n varname = name[5:].lower().replace('_', '-')\n config[varname] = value\n\n\ndef _initialize_logging_old_style(config):\n \"\"\"\n Initialize logging using the \"old-style\" configuration scheme, e.g.:\n {\n 'logging': {\n 'distributed': 'info',\n 'tornado': 'critical',\n 'tornado.application': 'error',\n }\n }\n \"\"\"\n loggers = { # default values\n 'distributed': 'info',\n 'distributed.client': 'warning',\n 'bokeh': 'critical',\n 'tornado': 'critical',\n 'tornado.application': 'error',\n }\n loggers.update(config.get('logging', {}))\n\n handler = logging.StreamHandler(sys.stderr)\n handler.setFormatter(logging.Formatter(log_format))\n for name, level in loggers.items():\n if isinstance(level, str):\n level = logging_names[level.upper()]\n logger = logging.getLogger(name)\n logger.setLevel(level)\n logger.handlers[:] = []\n logger.addHandler(handler)\n logger.propagate = False\n\n\ndef _initialize_logging_new_style(config):\n \"\"\"\n Initialize logging using logging's \"Configuration dictionary schema\".\n (ref.: https://docs.python.org/2/library/logging.config.html#logging-config-dictschema)\n \"\"\"\n logging.config.dictConfig(config['logging'])\n\n\ndef _initialize_logging_file_config(config):\n \"\"\"\n Initialize logging using logging's \"Configuration file format\".\n (ref.: https://docs.python.org/2/library/logging.config.html#configuration-file-format)\n \"\"\"\n logging.config.fileConfig(config['logging-file-config'], disable_existing_loggers=False)\n\n\ndef initialize_logging(config):\n if 'logging-file-config' in config:\n if 'logging' in config:\n raise RuntimeError(\"Config options 'logging-file-config' and 'logging' are mutually exclusive.\")\n _initialize_logging_file_config(config)\n else:\n log_config = config.get('logging', {})\n if 'version' in log_config:\n # logging module mandates version to be an int\n log_config['version'] = int(log_config['version'])\n _initialize_logging_new_style(config)\n else:\n _initialize_logging_old_style(config)\n\n\n@contextmanager\ndef set_config(arg=None, **kwargs):\n if arg and not kwargs:\n kwargs = arg\n old = {}\n for key in kwargs:\n if key in config:\n old[key] = config[key]\n\n for key, value in kwargs.items():\n config[key] = value\n\n try:\n yield\n finally:\n for key in kwargs:\n if key in old:\n config[key] = old[key]\n else:\n del config[key]\n\n\ntry:\n import yaml\nexcept ImportError:\n pass\nelse:\n path = determine_config_file()\n load_config_file(config, path)\n\nload_env_vars(config)\n\nlog_format = config.get('log-format', '%(name)s - %(levelname)s - %(message)s')\n\ninitialize_logging(config)\n", "path": "distributed/config.py"}], "after_files": [{"content": "from __future__ import print_function, division, absolute_import\n\nfrom contextlib import contextmanager\nimport logging\nimport logging.config\nimport os\nimport sys\nimport warnings\n\nfrom .compatibility import FileExistsError, logging_names\n\nlogger = logging.getLogger(__name__)\n\nconfig = {}\n\n\ndef ensure_config_file(source, destination):\n if not os.path.exists(destination):\n import shutil\n if not os.path.exists(os.path.dirname(destination)):\n try:\n os.mkdir(os.path.dirname(destination))\n except FileExistsError:\n pass\n # Atomically create destination. Parallel testing discovered\n # a race condition where a process can be busy creating the\n # destination while another process reads an empty config file.\n tmp = '%s.tmp.%d' % (destination, os.getpid())\n shutil.copy(source, tmp)\n try:\n os.rename(tmp, destination)\n except OSError:\n os.remove(tmp)\n\n\ndef determine_config_file():\n path = os.environ.get('DASK_CONFIG')\n if path:\n if (os.path.exists(path) and\n (os.path.isfile(path) or os.path.islink(path))):\n return path\n warnings.warn(\"DASK_CONFIG set to '%s' but file does not exist \"\n \"or is not a regular file\" % (path,),\n UserWarning)\n\n dirname = os.path.dirname(__file__)\n default_path = os.path.join(dirname, 'config.yaml')\n path = os.path.join(os.path.expanduser('~'), '.dask', 'config.yaml')\n\n try:\n ensure_config_file(default_path, path)\n except EnvironmentError as e:\n warnings.warn(\"Could not write default config file to '%s'. \"\n \"Received error %s\" % (path, e),\n UserWarning)\n\n return path if os.path.exists(path) else default_path\n\n\ndef load_config_file(config, path):\n with open(path) as f:\n text = f.read()\n config.update(yaml.load(text) or {})\n\n\ndef load_env_vars(config):\n for name, value in os.environ.items():\n if name.startswith('DASK_'):\n varname = name[5:].lower().replace('_', '-')\n config[varname] = _parse_env_value(value)\n\n\ndef _parse_env_value(value):\n \"\"\" Convert a string to an integer, float or boolean (in that order) if possible. \"\"\"\n bools = {\n 'true': True,\n 'false': False\n }\n try:\n return int(value)\n except ValueError:\n pass\n try:\n return float(value)\n except ValueError:\n pass\n return bools.get(value.lower(), value)\n\n\ndef _initialize_logging_old_style(config):\n \"\"\"\n Initialize logging using the \"old-style\" configuration scheme, e.g.:\n {\n 'logging': {\n 'distributed': 'info',\n 'tornado': 'critical',\n 'tornado.application': 'error',\n }\n }\n \"\"\"\n loggers = { # default values\n 'distributed': 'info',\n 'distributed.client': 'warning',\n 'bokeh': 'critical',\n 'tornado': 'critical',\n 'tornado.application': 'error',\n }\n loggers.update(config.get('logging', {}))\n\n handler = logging.StreamHandler(sys.stderr)\n handler.setFormatter(logging.Formatter(log_format))\n for name, level in loggers.items():\n if isinstance(level, str):\n level = logging_names[level.upper()]\n logger = logging.getLogger(name)\n logger.setLevel(level)\n logger.handlers[:] = []\n logger.addHandler(handler)\n logger.propagate = False\n\n\ndef _initialize_logging_new_style(config):\n \"\"\"\n Initialize logging using logging's \"Configuration dictionary schema\".\n (ref.: https://docs.python.org/2/library/logging.config.html#logging-config-dictschema)\n \"\"\"\n logging.config.dictConfig(config['logging'])\n\n\ndef _initialize_logging_file_config(config):\n \"\"\"\n Initialize logging using logging's \"Configuration file format\".\n (ref.: https://docs.python.org/2/library/logging.config.html#configuration-file-format)\n \"\"\"\n logging.config.fileConfig(config['logging-file-config'], disable_existing_loggers=False)\n\n\ndef initialize_logging(config):\n if 'logging-file-config' in config:\n if 'logging' in config:\n raise RuntimeError(\"Config options 'logging-file-config' and 'logging' are mutually exclusive.\")\n _initialize_logging_file_config(config)\n else:\n log_config = config.get('logging', {})\n if 'version' in log_config:\n # logging module mandates version to be an int\n log_config['version'] = int(log_config['version'])\n _initialize_logging_new_style(config)\n else:\n _initialize_logging_old_style(config)\n\n\n@contextmanager\ndef set_config(arg=None, **kwargs):\n if arg and not kwargs:\n kwargs = arg\n old = {}\n for key in kwargs:\n if key in config:\n old[key] = config[key]\n\n for key, value in kwargs.items():\n config[key] = value\n\n try:\n yield\n finally:\n for key in kwargs:\n if key in old:\n config[key] = old[key]\n else:\n del config[key]\n\n\ntry:\n import yaml\nexcept ImportError:\n pass\nelse:\n path = determine_config_file()\n load_config_file(config, path)\n\nload_env_vars(config)\n\nlog_format = config.get('log-format', '%(name)s - %(levelname)s - %(message)s')\n\ninitialize_logging(config)\n", "path": "distributed/config.py"}]}
| 2,220 | 208 |
gh_patches_debug_41420
|
rasdani/github-patches
|
git_diff
|
cornellius-gp__gpytorch-602
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ARD for RBFKernelGrad
Hi,
Is it possible to use ARD when training a GP model with derivative data (RBFkernelgrad)? Would it be possible to add support for that?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `gpytorch/kernels/rbf_kernel_grad.py`
Content:
```
1 #!/usr/bin/env python3
2 from .rbf_kernel import RBFKernel
3 import torch
4 from ..lazy.kronecker_product_lazy_tensor import KroneckerProductLazyTensor
5
6
7 class RBFKernelGrad(RBFKernel):
8 r"""
9 Computes a covariance matrix of the RBF kernel that models the covariance
10 between the values and partial derivatives for inputs :math:`\mathbf{x_1}`
11 and :math:`\mathbf{x_2}`.
12
13 See :class:`gpytorch.kernels.Kernel` for descriptions of the lengthscale options.
14
15 .. note::
16
17 This kernel does not have an `outputscale` parameter. To add a scaling parameter,
18 decorate this kernel with a :class:`gpytorch.kernels.ScaleKernel`.
19
20 Args:
21 :attr:`batch_shape` (torch.Size, optional):
22 Set this if you want a separate lengthscale for each
23 batch of input data. It should be `b` if :attr:`x1` is a `b x n x d` tensor. Default: `torch.Size([1])`.
24 :attr:`active_dims` (tuple of ints, optional):
25 Set this if you want to compute the covariance of only a few input dimensions. The ints
26 corresponds to the indices of the dimensions. Default: `None`.
27 :attr:`lengthscale_prior` (Prior, optional):
28 Set this if you want to apply a prior to the lengthscale parameter. Default: `None`.
29 :attr:`param_transform` (function, optional):
30 Set this if you want to use something other than softplus to ensure positiveness of parameters.
31 :attr:`inv_param_transform` (function, optional):
32 Set this to allow setting parameters directly in transformed space and sampling from priors.
33 Automatically inferred for common transformations such as torch.exp or torch.nn.functional.softplus.
34 :attr:`eps` (float):
35 The minimum value that the lengthscale can take (prevents divide by zero errors). Default: `1e-6`.
36
37 Attributes:
38 :attr:`lengthscale` (Tensor):
39 The lengthscale parameter. Size/shape of parameter depends on the
40 :attr:`ard_num_dims` and :attr:`batch_shape` arguments.
41
42 Example:
43 >>> x = torch.randn(10, 5)
44 >>> # Non-batch: Simple option
45 >>> covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.RBFKernelGrad())
46 >>> covar = covar_module(x) # Output: LazyTensor of size (60 x 60), where 60 = n * (d + 1)
47 >>>
48 >>> batch_x = torch.randn(2, 10, 5)
49 >>> # Batch: Simple option
50 >>> covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.RBFKernelGrad())
51 >>> # Batch: different lengthscale for each batch
52 >>> covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.RBFKernelGrad(batch_shape=torch.Size([2])))
53 >>> covar = covar_module(x) # Output: LazyTensor of size (2 x 60 x 60)
54 """
55 def forward(self, x1, x2, diag=False, **params):
56 b = 1
57 if len(x1.size()) == 2:
58 n1, d = x1.size()
59 n2, d = x2.size()
60 else:
61 b, n1, d = x1.size()
62 _, n2, _ = x2.size()
63
64 K = torch.zeros(b, n1 * (d + 1), n2 * (d + 1), device=x1.device, dtype=x1.dtype) # batch x n1(d+1) x n2(d+1)
65 ell = self.lengthscale.squeeze(-1)
66
67 if not diag:
68 # Scale the inputs by the lengthscale (for stability)
69 x1_ = x1 / ell
70 x2_ = x2 / ell
71
72 # Form all possible rank-1 products for the gradient and Hessian blocks
73 outer = x1_.view([b, n1, 1, d]) - x2_.view([b, 1, n2, d])
74 outer = torch.transpose(outer, -1, -2).contiguous()
75
76 # 1) Kernel block
77 diff = self._covar_dist(x1_, x2_, square_dist=True, **params)
78 K_11 = diff.div_(-2).exp_()
79 K[..., :n1, :n2] = K_11
80
81 # 2) First gradient block
82 outer1 = outer.view([b, n1, n2 * d]) / ell
83 K[..., :n1, n2:] = outer1 * K_11.repeat([1, 1, d])
84
85 # 3) Second gradient block
86 outer2 = outer.transpose(-1, -3).contiguous().view([b, n2, n1 * d])
87 outer2 = outer2.transpose(-1, -2) / ell
88 K[..., n1:, :n2] = -outer2 * K_11.repeat([1, d, 1])
89
90 # 4) Hessian block
91 outer3 = outer1.repeat([1, d, 1]) * outer2.repeat([1, 1, d])
92 kp = KroneckerProductLazyTensor(
93 torch.eye(d, d, device=x1.device, dtype=x1.dtype),
94 torch.ones(n1, n2, device=x1.device, dtype=x1.dtype)
95 )
96 chain_rule = kp.evaluate() / ell.pow(2) - outer3
97 K[..., n1:, n2:] = chain_rule * K_11.repeat([1, d, d])
98
99 # Symmetrize for stability
100 if n1 == n2 and torch.eq(x1, x2).all():
101 K = 0.5 * (K.transpose(-1, -2) + K)
102
103 # Apply a perfect shuffle permutation to match the MutiTask ordering
104 pi1 = torch.arange(n1 * (d + 1)).view(d + 1, n1).t().contiguous().view((n1 * (d + 1)))
105 pi2 = torch.arange(n2 * (d + 1)).view(d + 1, n2).t().contiguous().view((n2 * (d + 1)))
106 K = K[..., pi1, :][..., :, pi2]
107
108 return K
109
110 else: # TODO: This will change when ARD is supported
111 if not (n1 == n2 and torch.eq(x1, x2).all()):
112 raise RuntimeError("diag=True only works when x1 == x2")
113
114 kernel_diag = super(RBFKernelGrad, self).forward(x1, x2, diag=True)
115 grad_diag = torch.ones(1, n2 * d, device=x1.device, dtype=x1.dtype) / (ell.pow(2))
116 k_diag = torch.cat((kernel_diag, grad_diag), dim=-1)
117 pi = torch.arange(n2 * (d + 1)).view(d + 1, n2).t().contiguous().view((n2 * (d + 1)))
118 return k_diag[..., pi]
119
120 def size(self, x1, x2):
121 """
122 Given `x_1` with `n_1` data points and `x_2` with `n_2` data points, both in
123 `d` dimensions, RBFKernelGrad returns an `n_1(d+1) x n_2(d+1)` kernel matrix.
124 """
125 non_batch_size = ((x1.size(-1) + 1) * x1.size(-2), (x2.size(-1) + 1) * x2.size(-2))
126 if x1.ndimension() == 3:
127 return torch.Size((x1.size(0),) + non_batch_size)
128 else:
129 return torch.Size(non_batch_size)
130
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/gpytorch/kernels/rbf_kernel_grad.py b/gpytorch/kernels/rbf_kernel_grad.py
--- a/gpytorch/kernels/rbf_kernel_grad.py
+++ b/gpytorch/kernels/rbf_kernel_grad.py
@@ -62,15 +62,15 @@
_, n2, _ = x2.size()
K = torch.zeros(b, n1 * (d + 1), n2 * (d + 1), device=x1.device, dtype=x1.dtype) # batch x n1(d+1) x n2(d+1)
- ell = self.lengthscale.squeeze(-1)
if not diag:
# Scale the inputs by the lengthscale (for stability)
- x1_ = x1 / ell
- x2_ = x2 / ell
+ x1_ = x1.div(self.lengthscale)
+ x2_ = x2.div(self.lengthscale)
# Form all possible rank-1 products for the gradient and Hessian blocks
outer = x1_.view([b, n1, 1, d]) - x2_.view([b, 1, n2, d])
+ outer = outer / self.lengthscale
outer = torch.transpose(outer, -1, -2).contiguous()
# 1) Kernel block
@@ -79,21 +79,21 @@
K[..., :n1, :n2] = K_11
# 2) First gradient block
- outer1 = outer.view([b, n1, n2 * d]) / ell
+ outer1 = outer.view([b, n1, n2 * d])
K[..., :n1, n2:] = outer1 * K_11.repeat([1, 1, d])
# 3) Second gradient block
outer2 = outer.transpose(-1, -3).contiguous().view([b, n2, n1 * d])
- outer2 = outer2.transpose(-1, -2) / ell
+ outer2 = outer2.transpose(-1, -2)
K[..., n1:, :n2] = -outer2 * K_11.repeat([1, d, 1])
# 4) Hessian block
outer3 = outer1.repeat([1, d, 1]) * outer2.repeat([1, 1, d])
kp = KroneckerProductLazyTensor(
- torch.eye(d, d, device=x1.device, dtype=x1.dtype),
- torch.ones(n1, n2, device=x1.device, dtype=x1.dtype)
+ torch.eye(d, d, device=x1.device, dtype=x1.dtype).repeat([b, 1, 1]) / self.lengthscale.pow_(2),
+ torch.ones(n1, n2, device=x1.device, dtype=x1.dtype).repeat([b, 1, 1])
)
- chain_rule = kp.evaluate() / ell.pow(2) - outer3
+ chain_rule = kp.evaluate() - outer3
K[..., n1:, n2:] = chain_rule * K_11.repeat([1, d, d])
# Symmetrize for stability
@@ -107,12 +107,13 @@
return K
- else: # TODO: This will change when ARD is supported
+ else:
if not (n1 == n2 and torch.eq(x1, x2).all()):
raise RuntimeError("diag=True only works when x1 == x2")
kernel_diag = super(RBFKernelGrad, self).forward(x1, x2, diag=True)
- grad_diag = torch.ones(1, n2 * d, device=x1.device, dtype=x1.dtype) / (ell.pow(2))
+ grad_diag = torch.ones(b, n2, d, device=x1.device, dtype=x1.dtype) / self.lengthscale.pow_(2)
+ grad_diag = grad_diag.transpose(-1, -2).contiguous().view([b, n2 * d])
k_diag = torch.cat((kernel_diag, grad_diag), dim=-1)
pi = torch.arange(n2 * (d + 1)).view(d + 1, n2).t().contiguous().view((n2 * (d + 1)))
return k_diag[..., pi]
|
{"golden_diff": "diff --git a/gpytorch/kernels/rbf_kernel_grad.py b/gpytorch/kernels/rbf_kernel_grad.py\n--- a/gpytorch/kernels/rbf_kernel_grad.py\n+++ b/gpytorch/kernels/rbf_kernel_grad.py\n@@ -62,15 +62,15 @@\n _, n2, _ = x2.size()\n \n K = torch.zeros(b, n1 * (d + 1), n2 * (d + 1), device=x1.device, dtype=x1.dtype) # batch x n1(d+1) x n2(d+1)\n- ell = self.lengthscale.squeeze(-1)\n \n if not diag:\n # Scale the inputs by the lengthscale (for stability)\n- x1_ = x1 / ell\n- x2_ = x2 / ell\n+ x1_ = x1.div(self.lengthscale)\n+ x2_ = x2.div(self.lengthscale)\n \n # Form all possible rank-1 products for the gradient and Hessian blocks\n outer = x1_.view([b, n1, 1, d]) - x2_.view([b, 1, n2, d])\n+ outer = outer / self.lengthscale\n outer = torch.transpose(outer, -1, -2).contiguous()\n \n # 1) Kernel block\n@@ -79,21 +79,21 @@\n K[..., :n1, :n2] = K_11\n \n # 2) First gradient block\n- outer1 = outer.view([b, n1, n2 * d]) / ell\n+ outer1 = outer.view([b, n1, n2 * d])\n K[..., :n1, n2:] = outer1 * K_11.repeat([1, 1, d])\n \n # 3) Second gradient block\n outer2 = outer.transpose(-1, -3).contiguous().view([b, n2, n1 * d])\n- outer2 = outer2.transpose(-1, -2) / ell\n+ outer2 = outer2.transpose(-1, -2)\n K[..., n1:, :n2] = -outer2 * K_11.repeat([1, d, 1])\n \n # 4) Hessian block\n outer3 = outer1.repeat([1, d, 1]) * outer2.repeat([1, 1, d])\n kp = KroneckerProductLazyTensor(\n- torch.eye(d, d, device=x1.device, dtype=x1.dtype),\n- torch.ones(n1, n2, device=x1.device, dtype=x1.dtype)\n+ torch.eye(d, d, device=x1.device, dtype=x1.dtype).repeat([b, 1, 1]) / self.lengthscale.pow_(2),\n+ torch.ones(n1, n2, device=x1.device, dtype=x1.dtype).repeat([b, 1, 1])\n )\n- chain_rule = kp.evaluate() / ell.pow(2) - outer3\n+ chain_rule = kp.evaluate() - outer3\n K[..., n1:, n2:] = chain_rule * K_11.repeat([1, d, d])\n \n # Symmetrize for stability\n@@ -107,12 +107,13 @@\n \n return K\n \n- else: # TODO: This will change when ARD is supported\n+ else:\n if not (n1 == n2 and torch.eq(x1, x2).all()):\n raise RuntimeError(\"diag=True only works when x1 == x2\")\n \n kernel_diag = super(RBFKernelGrad, self).forward(x1, x2, diag=True)\n- grad_diag = torch.ones(1, n2 * d, device=x1.device, dtype=x1.dtype) / (ell.pow(2))\n+ grad_diag = torch.ones(b, n2, d, device=x1.device, dtype=x1.dtype) / self.lengthscale.pow_(2)\n+ grad_diag = grad_diag.transpose(-1, -2).contiguous().view([b, n2 * d])\n k_diag = torch.cat((kernel_diag, grad_diag), dim=-1)\n pi = torch.arange(n2 * (d + 1)).view(d + 1, n2).t().contiguous().view((n2 * (d + 1)))\n return k_diag[..., pi]\n", "issue": "ARD for RBFKernelGrad\nHi,\r\nIs it possible to use ARD when training a GP model with derivative data (RBFkernelgrad)? Would it be possible to add support for that?\n", "before_files": [{"content": "#!/usr/bin/env python3\nfrom .rbf_kernel import RBFKernel\nimport torch\nfrom ..lazy.kronecker_product_lazy_tensor import KroneckerProductLazyTensor\n\n\nclass RBFKernelGrad(RBFKernel):\n r\"\"\"\n Computes a covariance matrix of the RBF kernel that models the covariance\n between the values and partial derivatives for inputs :math:`\\mathbf{x_1}`\n and :math:`\\mathbf{x_2}`.\n\n See :class:`gpytorch.kernels.Kernel` for descriptions of the lengthscale options.\n\n .. note::\n\n This kernel does not have an `outputscale` parameter. To add a scaling parameter,\n decorate this kernel with a :class:`gpytorch.kernels.ScaleKernel`.\n\n Args:\n :attr:`batch_shape` (torch.Size, optional):\n Set this if you want a separate lengthscale for each\n batch of input data. It should be `b` if :attr:`x1` is a `b x n x d` tensor. Default: `torch.Size([1])`.\n :attr:`active_dims` (tuple of ints, optional):\n Set this if you want to compute the covariance of only a few input dimensions. The ints\n corresponds to the indices of the dimensions. Default: `None`.\n :attr:`lengthscale_prior` (Prior, optional):\n Set this if you want to apply a prior to the lengthscale parameter. Default: `None`.\n :attr:`param_transform` (function, optional):\n Set this if you want to use something other than softplus to ensure positiveness of parameters.\n :attr:`inv_param_transform` (function, optional):\n Set this to allow setting parameters directly in transformed space and sampling from priors.\n Automatically inferred for common transformations such as torch.exp or torch.nn.functional.softplus.\n :attr:`eps` (float):\n The minimum value that the lengthscale can take (prevents divide by zero errors). Default: `1e-6`.\n\n Attributes:\n :attr:`lengthscale` (Tensor):\n The lengthscale parameter. Size/shape of parameter depends on the\n :attr:`ard_num_dims` and :attr:`batch_shape` arguments.\n\n Example:\n >>> x = torch.randn(10, 5)\n >>> # Non-batch: Simple option\n >>> covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.RBFKernelGrad())\n >>> covar = covar_module(x) # Output: LazyTensor of size (60 x 60), where 60 = n * (d + 1)\n >>>\n >>> batch_x = torch.randn(2, 10, 5)\n >>> # Batch: Simple option\n >>> covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.RBFKernelGrad())\n >>> # Batch: different lengthscale for each batch\n >>> covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.RBFKernelGrad(batch_shape=torch.Size([2])))\n >>> covar = covar_module(x) # Output: LazyTensor of size (2 x 60 x 60)\n \"\"\"\n def forward(self, x1, x2, diag=False, **params):\n b = 1\n if len(x1.size()) == 2:\n n1, d = x1.size()\n n2, d = x2.size()\n else:\n b, n1, d = x1.size()\n _, n2, _ = x2.size()\n\n K = torch.zeros(b, n1 * (d + 1), n2 * (d + 1), device=x1.device, dtype=x1.dtype) # batch x n1(d+1) x n2(d+1)\n ell = self.lengthscale.squeeze(-1)\n\n if not diag:\n # Scale the inputs by the lengthscale (for stability)\n x1_ = x1 / ell\n x2_ = x2 / ell\n\n # Form all possible rank-1 products for the gradient and Hessian blocks\n outer = x1_.view([b, n1, 1, d]) - x2_.view([b, 1, n2, d])\n outer = torch.transpose(outer, -1, -2).contiguous()\n\n # 1) Kernel block\n diff = self._covar_dist(x1_, x2_, square_dist=True, **params)\n K_11 = diff.div_(-2).exp_()\n K[..., :n1, :n2] = K_11\n\n # 2) First gradient block\n outer1 = outer.view([b, n1, n2 * d]) / ell\n K[..., :n1, n2:] = outer1 * K_11.repeat([1, 1, d])\n\n # 3) Second gradient block\n outer2 = outer.transpose(-1, -3).contiguous().view([b, n2, n1 * d])\n outer2 = outer2.transpose(-1, -2) / ell\n K[..., n1:, :n2] = -outer2 * K_11.repeat([1, d, 1])\n\n # 4) Hessian block\n outer3 = outer1.repeat([1, d, 1]) * outer2.repeat([1, 1, d])\n kp = KroneckerProductLazyTensor(\n torch.eye(d, d, device=x1.device, dtype=x1.dtype),\n torch.ones(n1, n2, device=x1.device, dtype=x1.dtype)\n )\n chain_rule = kp.evaluate() / ell.pow(2) - outer3\n K[..., n1:, n2:] = chain_rule * K_11.repeat([1, d, d])\n\n # Symmetrize for stability\n if n1 == n2 and torch.eq(x1, x2).all():\n K = 0.5 * (K.transpose(-1, -2) + K)\n\n # Apply a perfect shuffle permutation to match the MutiTask ordering\n pi1 = torch.arange(n1 * (d + 1)).view(d + 1, n1).t().contiguous().view((n1 * (d + 1)))\n pi2 = torch.arange(n2 * (d + 1)).view(d + 1, n2).t().contiguous().view((n2 * (d + 1)))\n K = K[..., pi1, :][..., :, pi2]\n\n return K\n\n else: # TODO: This will change when ARD is supported\n if not (n1 == n2 and torch.eq(x1, x2).all()):\n raise RuntimeError(\"diag=True only works when x1 == x2\")\n\n kernel_diag = super(RBFKernelGrad, self).forward(x1, x2, diag=True)\n grad_diag = torch.ones(1, n2 * d, device=x1.device, dtype=x1.dtype) / (ell.pow(2))\n k_diag = torch.cat((kernel_diag, grad_diag), dim=-1)\n pi = torch.arange(n2 * (d + 1)).view(d + 1, n2).t().contiguous().view((n2 * (d + 1)))\n return k_diag[..., pi]\n\n def size(self, x1, x2):\n \"\"\"\n Given `x_1` with `n_1` data points and `x_2` with `n_2` data points, both in\n `d` dimensions, RBFKernelGrad returns an `n_1(d+1) x n_2(d+1)` kernel matrix.\n \"\"\"\n non_batch_size = ((x1.size(-1) + 1) * x1.size(-2), (x2.size(-1) + 1) * x2.size(-2))\n if x1.ndimension() == 3:\n return torch.Size((x1.size(0),) + non_batch_size)\n else:\n return torch.Size(non_batch_size)\n", "path": "gpytorch/kernels/rbf_kernel_grad.py"}], "after_files": [{"content": "#!/usr/bin/env python3\nfrom .rbf_kernel import RBFKernel\nimport torch\nfrom ..lazy.kronecker_product_lazy_tensor import KroneckerProductLazyTensor\n\n\nclass RBFKernelGrad(RBFKernel):\n r\"\"\"\n Computes a covariance matrix of the RBF kernel that models the covariance\n between the values and partial derivatives for inputs :math:`\\mathbf{x_1}`\n and :math:`\\mathbf{x_2}`.\n\n See :class:`gpytorch.kernels.Kernel` for descriptions of the lengthscale options.\n\n .. note::\n\n This kernel does not have an `outputscale` parameter. To add a scaling parameter,\n decorate this kernel with a :class:`gpytorch.kernels.ScaleKernel`.\n\n Args:\n :attr:`batch_shape` (torch.Size, optional):\n Set this if you want a separate lengthscale for each\n batch of input data. It should be `b` if :attr:`x1` is a `b x n x d` tensor. Default: `torch.Size([1])`.\n :attr:`active_dims` (tuple of ints, optional):\n Set this if you want to compute the covariance of only a few input dimensions. The ints\n corresponds to the indices of the dimensions. Default: `None`.\n :attr:`lengthscale_prior` (Prior, optional):\n Set this if you want to apply a prior to the lengthscale parameter. Default: `None`.\n :attr:`param_transform` (function, optional):\n Set this if you want to use something other than softplus to ensure positiveness of parameters.\n :attr:`inv_param_transform` (function, optional):\n Set this to allow setting parameters directly in transformed space and sampling from priors.\n Automatically inferred for common transformations such as torch.exp or torch.nn.functional.softplus.\n :attr:`eps` (float):\n The minimum value that the lengthscale can take (prevents divide by zero errors). Default: `1e-6`.\n\n Attributes:\n :attr:`lengthscale` (Tensor):\n The lengthscale parameter. Size/shape of parameter depends on the\n :attr:`ard_num_dims` and :attr:`batch_shape` arguments.\n\n Example:\n >>> x = torch.randn(10, 5)\n >>> # Non-batch: Simple option\n >>> covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.RBFKernelGrad())\n >>> covar = covar_module(x) # Output: LazyTensor of size (60 x 60), where 60 = n * (d + 1)\n >>>\n >>> batch_x = torch.randn(2, 10, 5)\n >>> # Batch: Simple option\n >>> covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.RBFKernelGrad())\n >>> # Batch: different lengthscale for each batch\n >>> covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.RBFKernelGrad(batch_shape=torch.Size([2])))\n >>> covar = covar_module(x) # Output: LazyTensor of size (2 x 60 x 60)\n \"\"\"\n def forward(self, x1, x2, diag=False, **params):\n b = 1\n if len(x1.size()) == 2:\n n1, d = x1.size()\n n2, d = x2.size()\n else:\n b, n1, d = x1.size()\n _, n2, _ = x2.size()\n\n K = torch.zeros(b, n1 * (d + 1), n2 * (d + 1), device=x1.device, dtype=x1.dtype) # batch x n1(d+1) x n2(d+1)\n\n if not diag:\n # Scale the inputs by the lengthscale (for stability)\n x1_ = x1.div(self.lengthscale)\n x2_ = x2.div(self.lengthscale)\n\n # Form all possible rank-1 products for the gradient and Hessian blocks\n outer = x1_.view([b, n1, 1, d]) - x2_.view([b, 1, n2, d])\n outer = outer / self.lengthscale\n outer = torch.transpose(outer, -1, -2).contiguous()\n\n # 1) Kernel block\n diff = self._covar_dist(x1_, x2_, square_dist=True, **params)\n K_11 = diff.div_(-2).exp_()\n K[..., :n1, :n2] = K_11\n\n # 2) First gradient block\n outer1 = outer.view([b, n1, n2 * d])\n K[..., :n1, n2:] = outer1 * K_11.repeat([1, 1, d])\n\n # 3) Second gradient block\n outer2 = outer.transpose(-1, -3).contiguous().view([b, n2, n1 * d])\n outer2 = outer2.transpose(-1, -2)\n K[..., n1:, :n2] = -outer2 * K_11.repeat([1, d, 1])\n\n # 4) Hessian block\n outer3 = outer1.repeat([1, d, 1]) * outer2.repeat([1, 1, d])\n kp = KroneckerProductLazyTensor(\n torch.eye(d, d, device=x1.device, dtype=x1.dtype).repeat([b, 1, 1]) / self.lengthscale.pow_(2),\n torch.ones(n1, n2, device=x1.device, dtype=x1.dtype).repeat([b, 1, 1])\n )\n chain_rule = kp.evaluate() - outer3\n K[..., n1:, n2:] = chain_rule * K_11.repeat([1, d, d])\n\n # Symmetrize for stability\n if n1 == n2 and torch.eq(x1, x2).all():\n K = 0.5 * (K.transpose(-1, -2) + K)\n\n # Apply a perfect shuffle permutation to match the MutiTask ordering\n pi1 = torch.arange(n1 * (d + 1)).view(d + 1, n1).t().contiguous().view((n1 * (d + 1)))\n pi2 = torch.arange(n2 * (d + 1)).view(d + 1, n2).t().contiguous().view((n2 * (d + 1)))\n K = K[..., pi1, :][..., :, pi2]\n\n return K\n\n else:\n if not (n1 == n2 and torch.eq(x1, x2).all()):\n raise RuntimeError(\"diag=True only works when x1 == x2\")\n\n kernel_diag = super(RBFKernelGrad, self).forward(x1, x2, diag=True)\n grad_diag = torch.ones(b, n2, d, device=x1.device, dtype=x1.dtype) / self.lengthscale.pow_(2)\n grad_diag = grad_diag.transpose(-1, -2).contiguous().view([b, n2 * d])\n k_diag = torch.cat((kernel_diag, grad_diag), dim=-1)\n pi = torch.arange(n2 * (d + 1)).view(d + 1, n2).t().contiguous().view((n2 * (d + 1)))\n return k_diag[..., pi]\n\n def size(self, x1, x2):\n \"\"\"\n Given `x_1` with `n_1` data points and `x_2` with `n_2` data points, both in\n `d` dimensions, RBFKernelGrad returns an `n_1(d+1) x n_2(d+1)` kernel matrix.\n \"\"\"\n non_batch_size = ((x1.size(-1) + 1) * x1.size(-2), (x2.size(-1) + 1) * x2.size(-2))\n if x1.ndimension() == 3:\n return torch.Size((x1.size(0),) + non_batch_size)\n else:\n return torch.Size(non_batch_size)\n", "path": "gpytorch/kernels/rbf_kernel_grad.py"}]}
| 2,343 | 969 |
gh_patches_debug_7272
|
rasdani/github-patches
|
git_diff
|
awslabs__gluonts-2141
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
TypeError: PandasDataset() takes no arguments for PandasDataset.from_long_dataframe
Following as described in https://ts.gluon.ai/stable/tutorials/data_manipulation/pandasdataframes.html#Use-case-3---Loading-data-with-missing-values I could not get it to work with my own DF nor the example given:
```
ds = PandasDataset.from_long_dataframe(df, target="target", item_id="item_id")
Traceback (most recent call last):
File "C:\\AppData\Local\Temp/ipykernel_33648/388385624.py", line 1, in <module>
ds = PandasDataset.from_long_dataframe(dfx, target="target", item_id="item_id")
File "C:\\Anaconda3\lib\site-packages\gluonts\dataset\pandas.py", line 182, in from_long_dataframe
return cls(dataframes=dict(list(dataframe.groupby(item_id))), **kwargs)
TypeError: PandasDataset() takes no arguments
```
Also wanted to know if there are any plans of adding a methodology to achieve the same result starting from a wide DF, but (unlike the wide df example) incorporating a way to pass a list of feat_dynamic_cat, feat_static_cat, and so on (in same shape as the width of the wide df).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/gluonts/dataset/pandas.py`
Content:
```
1 # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License").
4 # You may not use this file except in compliance with the License.
5 # A copy of the License is located at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # or in the "license" file accompanying this file. This file is distributed
10 # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
11 # express or implied. See the License for the specific language governing
12 # permissions and limitations under the License.
13
14 from copy import deepcopy
15 from dataclasses import dataclass, field
16 from typing import Any, cast, Dict, Iterator, List, Optional, Union
17
18 import pandas as pd
19 from pandas.core.indexes.datetimelike import DatetimeIndexOpsMixin
20 from toolz import valmap
21
22 from gluonts.dataset.common import Dataset, DataEntry, ProcessDataEntry
23 from gluonts.dataset.field_names import FieldName
24
25
26 @dataclass
27 class PandasDataset(Dataset):
28 """
29 A pandas.DataFrame-based dataset type.
30
31 This class is constructed with a collection of pandas.DataFrame-objects
32 where each ``DataFrame`` is representing one time series.
33 A ``target`` and a ``timestamp`` columns are essential. Furthermore,
34 static/dynamic real/categorical features can be specified.
35
36 Parameters
37 ----------
38 dataframes
39 Single ``pd.DataFrame``/``pd.Series`` or a collection as list or dict
40 containing at least ``timestamp`` and ``target`` values.
41 If a Dict is provided, the key will be the associated ``item_id``.
42 target
43 Name of the column that contains the ``target`` time series.
44 For multivariate targets, a list of column names should be provided.
45 timestamp
46 Name of the column that contains the timestamp information.
47 freq
48 Frequency of observations in the time series. Must be a valid pandas
49 frequency.
50 feat_dynamic_real
51 List of column names that contain dynamic real features.
52 feat_dynamic_cat
53 List of column names that contain dynamic categorical features.
54 feat_static_real
55 List of column names that contain static real features.
56 feat_static_cat
57 List of column names that contain static categorical features.
58 past_feat_dynamic_real
59 List of column names that contain dynamic real features only for the
60 history.
61 ignore_last_n_targets
62 For target and past dynamic features last ``ignore_last_n_targets``
63 elements are removed when iterating over the data set. This becomes
64 important when the predictor is called.
65 """
66
67 dataframes: Union[
68 pd.DataFrame,
69 pd.Series,
70 List[pd.DataFrame],
71 List[pd.Series],
72 Dict[str, pd.DataFrame],
73 Dict[str, pd.Series],
74 ]
75 target: Union[str, List[str]] = "target"
76 timestamp: Optional[str] = None
77 freq: Optional[str] = None
78 feat_dynamic_real: List[str] = field(default_factory=list)
79 feat_dynamic_cat: List[str] = field(default_factory=list)
80 feat_static_real: List[str] = field(default_factory=list)
81 feat_static_cat: List[str] = field(default_factory=list)
82 past_feat_dynamic_real: List[str] = field(default_factory=list)
83 ignore_last_n_targets: int = 0
84
85 def __post_init__(self) -> None:
86 if isinstance(self.target, list) and len(self.target) == 1:
87 self.target = self.target[0]
88 self.one_dim_target = not isinstance(self.target, list)
89
90 if is_series(self.dataframes):
91 self.dataframes = series_to_dataframe(self.dataframes)
92 # store data internally as List[Tuple[str, pandas.DataFrame]]
93 # if str is not empty it will be set in ``DataEntry`` as ``item_id``.
94 if isinstance(self.dataframes, dict):
95 self._dataframes = list(self.dataframes.items())
96 elif isinstance(self.dataframes, list):
97 self._dataframes = [(None, df) for df in self.dataframes]
98 else: # case single dataframe
99 self._dataframes = [(None, self.dataframes)]
100
101 for i, (item_id, df) in enumerate(self._dataframes):
102 if self.timestamp:
103 df = df.set_index(keys=self.timestamp)
104
105 if not isinstance(df.index, pd.PeriodIndex):
106 df.index = pd.to_datetime(df.index)
107 df = df.to_period(freq=self.freq)
108
109 df.sort_index(inplace=True)
110
111 assert is_uniform(df.index), (
112 "Dataframe index is not uniformly spaced. "
113 "If your dataframe contains data from multiple series in the "
114 'same column ("long" format), consider constructing the '
115 "dataset with `PandasDataset.from_long_dataframe` instead."
116 )
117
118 self._dataframes[i] = (item_id, df)
119
120 if not self.freq: # infer frequency from index
121 self.freq = self._dataframes[0][1].index.freqstr
122
123 self.process = ProcessDataEntry(
124 cast(str, self.freq), one_dim_target=self.one_dim_target
125 )
126
127 def _dataentry(
128 self, item_id: Optional[str], df: pd.DataFrame
129 ) -> DataEntry:
130 dataentry = as_dataentry(
131 data=df,
132 target=self.target,
133 feat_dynamic_real=self.feat_dynamic_real,
134 feat_dynamic_cat=self.feat_dynamic_cat,
135 feat_static_real=self.feat_static_real,
136 feat_static_cat=self.feat_static_cat,
137 past_feat_dynamic_real=self.past_feat_dynamic_real,
138 )
139 if item_id is not None:
140 dataentry["item_id"] = item_id
141 return dataentry
142
143 def __iter__(self) -> Iterator[DataEntry]:
144 for item_id, df in self._dataframes:
145 dataentry = self.process(self._dataentry(item_id, df))
146 if self.ignore_last_n_targets:
147 dataentry = prepare_prediction_data(
148 dataentry, self.ignore_last_n_targets
149 )
150 yield dataentry
151
152 def __len__(self) -> int:
153 return len(self._dataframes)
154
155 @classmethod
156 def from_long_dataframe(
157 cls, dataframe: pd.DataFrame, item_id: str, **kwargs
158 ) -> "PandasDataset":
159 """
160 Construct ``PandasDataset`` out of a long dataframe.
161 A long dataframe uses the long format for each variable. Target time
162 series values, for example, are stacked on top of each other rather
163 than side-by-side. The same is true for other dynamic or categorical
164 features.
165
166 Parameters
167 ----------
168 dataframe
169 pandas.DataFrame containing at least ``timestamp``, ``target`` and
170 ``item_id`` columns.
171 item_id
172 Name of the column that, when grouped by, gives the different time
173 series.
174 **kwargs
175 Additional arguments. Same as of PandasDataset class.
176
177 Returns
178 -------
179 PandasDataset
180 Gluonts dataset based on ``pandas.DataFrame``s.
181 """
182 return cls(dataframes=dict(list(dataframe.groupby(item_id))), **kwargs)
183
184
185 def series_to_dataframe(
186 series: Union[pd.Series, List[pd.Series], Dict[str, pd.Series]]
187 ) -> Union[pd.DataFrame, List[pd.DataFrame], Dict[str, pd.DataFrame]]:
188 def to_df(series):
189 assert isinstance(
190 series.index, DatetimeIndexOpsMixin
191 ), "series index has to be a DatetimeIndex."
192 return series.to_frame(name="target")
193
194 if isinstance(series, list):
195 return list(map(to_df, series))
196 elif isinstance(series, dict):
197 return valmap(to_df, series)
198 return to_df(series)
199
200
201 def is_series(series: Any) -> bool:
202 """
203 return True if ``series`` is ``pd.Series`` or a collection of
204 ``pd.Series``.
205 """
206 if isinstance(series, list):
207 return is_series(series[0])
208 elif isinstance(series, dict):
209 return is_series(list(series.values()))
210 return isinstance(series, pd.Series)
211
212
213 def as_dataentry(
214 data: pd.DataFrame,
215 target: Union[str, List[str]],
216 timestamp: Optional[str] = None,
217 feat_dynamic_real: List[str] = [],
218 feat_dynamic_cat: List[str] = [],
219 feat_static_real: List[str] = [],
220 feat_static_cat: List[str] = [],
221 past_feat_dynamic_real: List[str] = [],
222 ) -> DataEntry:
223 """
224 Convert a single time series (uni- or multi-variate) that is given in
225 a pandas.DataFrame format to a DataEntry.
226
227 Parameters
228 ----------
229 data
230 pandas.DataFrame containing at least ``timestamp``, ``target`` and
231 ``item_id`` columns.
232 target
233 Name of the column that contains the ``target`` time series.
234 For multivariate targets ``target`` is expecting a list of column
235 names.
236 timestamp
237 Name of the column that contains the timestamp information.
238 If ``None`` the index of ``data`` is assumed to be the time.
239 feat_dynamic_real
240 List of column names that contain dynamic real features.
241 feat_dynamic_cat
242 List of column names that contain dynamic categorical features.
243 feat_static_real
244 List of column names that contain static real features.
245 feat_static_cat
246 List of column names that contain static categorical features.
247 past_feat_dynamic_real
248 List of column names that contain dynamic real features only for
249 the history.
250
251 Returns
252 -------
253 DataEntry
254 A dictionary with at least ``target`` and ``start`` field.
255 """
256 start = data.loc[:, timestamp].iloc[0] if timestamp else data.index[0]
257 dataentry = {FieldName.START: start}
258
259 def set_field(fieldname, col_names, f=lambda x: x):
260 if col_names:
261 dataentry[fieldname] = [
262 f(data.loc[:, n].to_list()) for n in col_names
263 ]
264
265 if isinstance(target, str):
266 dataentry[FieldName.TARGET] = data.loc[:, target].to_list()
267 else:
268 set_field(FieldName.TARGET, target)
269 set_field(FieldName.FEAT_DYNAMIC_REAL, feat_dynamic_real)
270 set_field(FieldName.FEAT_DYNAMIC_CAT, feat_dynamic_cat)
271 set_field(FieldName.FEAT_STATIC_REAL, feat_static_real, lambda x: x[0])
272 set_field(FieldName.FEAT_STATIC_CAT, feat_static_cat, lambda x: x[0])
273 set_field(FieldName.PAST_FEAT_DYNAMIC_REAL, past_feat_dynamic_real)
274 return dataentry
275
276
277 def prepare_prediction_data(
278 dataentry: DataEntry, ignore_last_n_targets: int
279 ) -> DataEntry:
280 """
281 Remove ``ignore_last_n_targets`` values from ``target`` and
282 ``past_feat_dynamic_real``. Works in univariate and multivariate case.
283
284 >>> prepare_prediction_data(
285 >>> {"target": np.array([1., 2., 3., 4.])}, ignore_last_n_targets=2
286 >>> )
287 {'target': array([1., 2.])}
288 """
289 entry = deepcopy(dataentry)
290 for fname in [FieldName.TARGET, FieldName.PAST_FEAT_DYNAMIC_REAL]:
291 if fname in entry:
292 entry[fname] = entry[fname][..., :-ignore_last_n_targets]
293 return entry
294
295
296 def is_uniform(index: pd.PeriodIndex) -> bool:
297 """
298 Check if ``index`` contains monotonically increasing periods, evenly spaced
299 with frequency ``index.freq``.
300
301 >>> ts = ["2021-01-01 00:00", "2021-01-01 02:00", "2021-01-01 04:00"]
302 >>> is_uniform(pd.DatetimeIndex(ts).to_period("2H"))
303 True
304 >>> ts = ["2021-01-01 00:00", "2021-01-01 04:00"]
305 >>> is_uniform(pd.DatetimeIndex(ts).to_period("2H"))
306 False
307 """
308 return (index[1:] - index[:-1] == index.freq).all()
309
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/gluonts/dataset/pandas.py b/src/gluonts/dataset/pandas.py
--- a/src/gluonts/dataset/pandas.py
+++ b/src/gluonts/dataset/pandas.py
@@ -19,12 +19,12 @@
from pandas.core.indexes.datetimelike import DatetimeIndexOpsMixin
from toolz import valmap
-from gluonts.dataset.common import Dataset, DataEntry, ProcessDataEntry
+from gluonts.dataset.common import DataEntry, ProcessDataEntry
from gluonts.dataset.field_names import FieldName
@dataclass
-class PandasDataset(Dataset):
+class PandasDataset:
"""
A pandas.DataFrame-based dataset type.
|
{"golden_diff": "diff --git a/src/gluonts/dataset/pandas.py b/src/gluonts/dataset/pandas.py\n--- a/src/gluonts/dataset/pandas.py\n+++ b/src/gluonts/dataset/pandas.py\n@@ -19,12 +19,12 @@\n from pandas.core.indexes.datetimelike import DatetimeIndexOpsMixin\n from toolz import valmap\n \n-from gluonts.dataset.common import Dataset, DataEntry, ProcessDataEntry\n+from gluonts.dataset.common import DataEntry, ProcessDataEntry\n from gluonts.dataset.field_names import FieldName\n \n \n @dataclass\n-class PandasDataset(Dataset):\n+class PandasDataset:\n \"\"\"\n A pandas.DataFrame-based dataset type.\n", "issue": "TypeError: PandasDataset() takes no arguments for PandasDataset.from_long_dataframe\nFollowing as described in https://ts.gluon.ai/stable/tutorials/data_manipulation/pandasdataframes.html#Use-case-3---Loading-data-with-missing-values I could not get it to work with my own DF nor the example given:\r\n\r\n```\r\nds = PandasDataset.from_long_dataframe(df, target=\"target\", item_id=\"item_id\")\r\nTraceback (most recent call last):\r\n\r\n File \"C:\\\\AppData\\Local\\Temp/ipykernel_33648/388385624.py\", line 1, in <module>\r\n ds = PandasDataset.from_long_dataframe(dfx, target=\"target\", item_id=\"item_id\")\r\n\r\n File \"C:\\\\Anaconda3\\lib\\site-packages\\gluonts\\dataset\\pandas.py\", line 182, in from_long_dataframe\r\n return cls(dataframes=dict(list(dataframe.groupby(item_id))), **kwargs)\r\n\r\nTypeError: PandasDataset() takes no arguments\r\n```\r\n\r\nAlso wanted to know if there are any plans of adding a methodology to achieve the same result starting from a wide DF, but (unlike the wide df example) incorporating a way to pass a list of feat_dynamic_cat, feat_static_cat, and so on (in same shape as the width of the wide df).\r\n\r\n\n", "before_files": [{"content": "# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\").\n# You may not use this file except in compliance with the License.\n# A copy of the License is located at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# or in the \"license\" file accompanying this file. This file is distributed\n# on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n# express or implied. See the License for the specific language governing\n# permissions and limitations under the License.\n\nfrom copy import deepcopy\nfrom dataclasses import dataclass, field\nfrom typing import Any, cast, Dict, Iterator, List, Optional, Union\n\nimport pandas as pd\nfrom pandas.core.indexes.datetimelike import DatetimeIndexOpsMixin\nfrom toolz import valmap\n\nfrom gluonts.dataset.common import Dataset, DataEntry, ProcessDataEntry\nfrom gluonts.dataset.field_names import FieldName\n\n\n@dataclass\nclass PandasDataset(Dataset):\n \"\"\"\n A pandas.DataFrame-based dataset type.\n\n This class is constructed with a collection of pandas.DataFrame-objects\n where each ``DataFrame`` is representing one time series.\n A ``target`` and a ``timestamp`` columns are essential. Furthermore,\n static/dynamic real/categorical features can be specified.\n\n Parameters\n ----------\n dataframes\n Single ``pd.DataFrame``/``pd.Series`` or a collection as list or dict\n containing at least ``timestamp`` and ``target`` values.\n If a Dict is provided, the key will be the associated ``item_id``.\n target\n Name of the column that contains the ``target`` time series.\n For multivariate targets, a list of column names should be provided.\n timestamp\n Name of the column that contains the timestamp information.\n freq\n Frequency of observations in the time series. Must be a valid pandas\n frequency.\n feat_dynamic_real\n List of column names that contain dynamic real features.\n feat_dynamic_cat\n List of column names that contain dynamic categorical features.\n feat_static_real\n List of column names that contain static real features.\n feat_static_cat\n List of column names that contain static categorical features.\n past_feat_dynamic_real\n List of column names that contain dynamic real features only for the\n history.\n ignore_last_n_targets\n For target and past dynamic features last ``ignore_last_n_targets``\n elements are removed when iterating over the data set. This becomes\n important when the predictor is called.\n \"\"\"\n\n dataframes: Union[\n pd.DataFrame,\n pd.Series,\n List[pd.DataFrame],\n List[pd.Series],\n Dict[str, pd.DataFrame],\n Dict[str, pd.Series],\n ]\n target: Union[str, List[str]] = \"target\"\n timestamp: Optional[str] = None\n freq: Optional[str] = None\n feat_dynamic_real: List[str] = field(default_factory=list)\n feat_dynamic_cat: List[str] = field(default_factory=list)\n feat_static_real: List[str] = field(default_factory=list)\n feat_static_cat: List[str] = field(default_factory=list)\n past_feat_dynamic_real: List[str] = field(default_factory=list)\n ignore_last_n_targets: int = 0\n\n def __post_init__(self) -> None:\n if isinstance(self.target, list) and len(self.target) == 1:\n self.target = self.target[0]\n self.one_dim_target = not isinstance(self.target, list)\n\n if is_series(self.dataframes):\n self.dataframes = series_to_dataframe(self.dataframes)\n # store data internally as List[Tuple[str, pandas.DataFrame]]\n # if str is not empty it will be set in ``DataEntry`` as ``item_id``.\n if isinstance(self.dataframes, dict):\n self._dataframes = list(self.dataframes.items())\n elif isinstance(self.dataframes, list):\n self._dataframes = [(None, df) for df in self.dataframes]\n else: # case single dataframe\n self._dataframes = [(None, self.dataframes)]\n\n for i, (item_id, df) in enumerate(self._dataframes):\n if self.timestamp:\n df = df.set_index(keys=self.timestamp)\n\n if not isinstance(df.index, pd.PeriodIndex):\n df.index = pd.to_datetime(df.index)\n df = df.to_period(freq=self.freq)\n\n df.sort_index(inplace=True)\n\n assert is_uniform(df.index), (\n \"Dataframe index is not uniformly spaced. \"\n \"If your dataframe contains data from multiple series in the \"\n 'same column (\"long\" format), consider constructing the '\n \"dataset with `PandasDataset.from_long_dataframe` instead.\"\n )\n\n self._dataframes[i] = (item_id, df)\n\n if not self.freq: # infer frequency from index\n self.freq = self._dataframes[0][1].index.freqstr\n\n self.process = ProcessDataEntry(\n cast(str, self.freq), one_dim_target=self.one_dim_target\n )\n\n def _dataentry(\n self, item_id: Optional[str], df: pd.DataFrame\n ) -> DataEntry:\n dataentry = as_dataentry(\n data=df,\n target=self.target,\n feat_dynamic_real=self.feat_dynamic_real,\n feat_dynamic_cat=self.feat_dynamic_cat,\n feat_static_real=self.feat_static_real,\n feat_static_cat=self.feat_static_cat,\n past_feat_dynamic_real=self.past_feat_dynamic_real,\n )\n if item_id is not None:\n dataentry[\"item_id\"] = item_id\n return dataentry\n\n def __iter__(self) -> Iterator[DataEntry]:\n for item_id, df in self._dataframes:\n dataentry = self.process(self._dataentry(item_id, df))\n if self.ignore_last_n_targets:\n dataentry = prepare_prediction_data(\n dataentry, self.ignore_last_n_targets\n )\n yield dataentry\n\n def __len__(self) -> int:\n return len(self._dataframes)\n\n @classmethod\n def from_long_dataframe(\n cls, dataframe: pd.DataFrame, item_id: str, **kwargs\n ) -> \"PandasDataset\":\n \"\"\"\n Construct ``PandasDataset`` out of a long dataframe.\n A long dataframe uses the long format for each variable. Target time\n series values, for example, are stacked on top of each other rather\n than side-by-side. The same is true for other dynamic or categorical\n features.\n\n Parameters\n ----------\n dataframe\n pandas.DataFrame containing at least ``timestamp``, ``target`` and\n ``item_id`` columns.\n item_id\n Name of the column that, when grouped by, gives the different time\n series.\n **kwargs\n Additional arguments. Same as of PandasDataset class.\n\n Returns\n -------\n PandasDataset\n Gluonts dataset based on ``pandas.DataFrame``s.\n \"\"\"\n return cls(dataframes=dict(list(dataframe.groupby(item_id))), **kwargs)\n\n\ndef series_to_dataframe(\n series: Union[pd.Series, List[pd.Series], Dict[str, pd.Series]]\n) -> Union[pd.DataFrame, List[pd.DataFrame], Dict[str, pd.DataFrame]]:\n def to_df(series):\n assert isinstance(\n series.index, DatetimeIndexOpsMixin\n ), \"series index has to be a DatetimeIndex.\"\n return series.to_frame(name=\"target\")\n\n if isinstance(series, list):\n return list(map(to_df, series))\n elif isinstance(series, dict):\n return valmap(to_df, series)\n return to_df(series)\n\n\ndef is_series(series: Any) -> bool:\n \"\"\"\n return True if ``series`` is ``pd.Series`` or a collection of\n ``pd.Series``.\n \"\"\"\n if isinstance(series, list):\n return is_series(series[0])\n elif isinstance(series, dict):\n return is_series(list(series.values()))\n return isinstance(series, pd.Series)\n\n\ndef as_dataentry(\n data: pd.DataFrame,\n target: Union[str, List[str]],\n timestamp: Optional[str] = None,\n feat_dynamic_real: List[str] = [],\n feat_dynamic_cat: List[str] = [],\n feat_static_real: List[str] = [],\n feat_static_cat: List[str] = [],\n past_feat_dynamic_real: List[str] = [],\n) -> DataEntry:\n \"\"\"\n Convert a single time series (uni- or multi-variate) that is given in\n a pandas.DataFrame format to a DataEntry.\n\n Parameters\n ----------\n data\n pandas.DataFrame containing at least ``timestamp``, ``target`` and\n ``item_id`` columns.\n target\n Name of the column that contains the ``target`` time series.\n For multivariate targets ``target`` is expecting a list of column\n names.\n timestamp\n Name of the column that contains the timestamp information.\n If ``None`` the index of ``data`` is assumed to be the time.\n feat_dynamic_real\n List of column names that contain dynamic real features.\n feat_dynamic_cat\n List of column names that contain dynamic categorical features.\n feat_static_real\n List of column names that contain static real features.\n feat_static_cat\n List of column names that contain static categorical features.\n past_feat_dynamic_real\n List of column names that contain dynamic real features only for\n the history.\n\n Returns\n -------\n DataEntry\n A dictionary with at least ``target`` and ``start`` field.\n \"\"\"\n start = data.loc[:, timestamp].iloc[0] if timestamp else data.index[0]\n dataentry = {FieldName.START: start}\n\n def set_field(fieldname, col_names, f=lambda x: x):\n if col_names:\n dataentry[fieldname] = [\n f(data.loc[:, n].to_list()) for n in col_names\n ]\n\n if isinstance(target, str):\n dataentry[FieldName.TARGET] = data.loc[:, target].to_list()\n else:\n set_field(FieldName.TARGET, target)\n set_field(FieldName.FEAT_DYNAMIC_REAL, feat_dynamic_real)\n set_field(FieldName.FEAT_DYNAMIC_CAT, feat_dynamic_cat)\n set_field(FieldName.FEAT_STATIC_REAL, feat_static_real, lambda x: x[0])\n set_field(FieldName.FEAT_STATIC_CAT, feat_static_cat, lambda x: x[0])\n set_field(FieldName.PAST_FEAT_DYNAMIC_REAL, past_feat_dynamic_real)\n return dataentry\n\n\ndef prepare_prediction_data(\n dataentry: DataEntry, ignore_last_n_targets: int\n) -> DataEntry:\n \"\"\"\n Remove ``ignore_last_n_targets`` values from ``target`` and\n ``past_feat_dynamic_real``. Works in univariate and multivariate case.\n\n >>> prepare_prediction_data(\n >>> {\"target\": np.array([1., 2., 3., 4.])}, ignore_last_n_targets=2\n >>> )\n {'target': array([1., 2.])}\n \"\"\"\n entry = deepcopy(dataentry)\n for fname in [FieldName.TARGET, FieldName.PAST_FEAT_DYNAMIC_REAL]:\n if fname in entry:\n entry[fname] = entry[fname][..., :-ignore_last_n_targets]\n return entry\n\n\ndef is_uniform(index: pd.PeriodIndex) -> bool:\n \"\"\"\n Check if ``index`` contains monotonically increasing periods, evenly spaced\n with frequency ``index.freq``.\n\n >>> ts = [\"2021-01-01 00:00\", \"2021-01-01 02:00\", \"2021-01-01 04:00\"]\n >>> is_uniform(pd.DatetimeIndex(ts).to_period(\"2H\"))\n True\n >>> ts = [\"2021-01-01 00:00\", \"2021-01-01 04:00\"]\n >>> is_uniform(pd.DatetimeIndex(ts).to_period(\"2H\"))\n False\n \"\"\"\n return (index[1:] - index[:-1] == index.freq).all()\n", "path": "src/gluonts/dataset/pandas.py"}], "after_files": [{"content": "# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\").\n# You may not use this file except in compliance with the License.\n# A copy of the License is located at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# or in the \"license\" file accompanying this file. This file is distributed\n# on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n# express or implied. See the License for the specific language governing\n# permissions and limitations under the License.\n\nfrom copy import deepcopy\nfrom dataclasses import dataclass, field\nfrom typing import Any, cast, Dict, Iterator, List, Optional, Union\n\nimport pandas as pd\nfrom pandas.core.indexes.datetimelike import DatetimeIndexOpsMixin\nfrom toolz import valmap\n\nfrom gluonts.dataset.common import DataEntry, ProcessDataEntry\nfrom gluonts.dataset.field_names import FieldName\n\n\n@dataclass\nclass PandasDataset:\n \"\"\"\n A pandas.DataFrame-based dataset type.\n\n This class is constructed with a collection of pandas.DataFrame-objects\n where each ``DataFrame`` is representing one time series.\n A ``target`` and a ``timestamp`` columns are essential. Furthermore,\n static/dynamic real/categorical features can be specified.\n\n Parameters\n ----------\n dataframes\n Single ``pd.DataFrame``/``pd.Series`` or a collection as list or dict\n containing at least ``timestamp`` and ``target`` values.\n If a Dict is provided, the key will be the associated ``item_id``.\n target\n Name of the column that contains the ``target`` time series.\n For multivariate targets, a list of column names should be provided.\n timestamp\n Name of the column that contains the timestamp information.\n freq\n Frequency of observations in the time series. Must be a valid pandas\n frequency.\n feat_dynamic_real\n List of column names that contain dynamic real features.\n feat_dynamic_cat\n List of column names that contain dynamic categorical features.\n feat_static_real\n List of column names that contain static real features.\n feat_static_cat\n List of column names that contain static categorical features.\n past_feat_dynamic_real\n List of column names that contain dynamic real features only for the\n history.\n ignore_last_n_targets\n For target and past dynamic features last ``ignore_last_n_targets``\n elements are removed when iterating over the data set. This becomes\n important when the predictor is called.\n \"\"\"\n\n dataframes: Union[\n pd.DataFrame,\n pd.Series,\n List[pd.DataFrame],\n List[pd.Series],\n Dict[str, pd.DataFrame],\n Dict[str, pd.Series],\n ]\n target: Union[str, List[str]] = \"target\"\n timestamp: Optional[str] = None\n freq: Optional[str] = None\n feat_dynamic_real: List[str] = field(default_factory=list)\n feat_dynamic_cat: List[str] = field(default_factory=list)\n feat_static_real: List[str] = field(default_factory=list)\n feat_static_cat: List[str] = field(default_factory=list)\n past_feat_dynamic_real: List[str] = field(default_factory=list)\n ignore_last_n_targets: int = 0\n\n def __post_init__(self) -> None:\n if isinstance(self.target, list) and len(self.target) == 1:\n self.target = self.target[0]\n self.one_dim_target = not isinstance(self.target, list)\n\n if is_series(self.dataframes):\n self.dataframes = series_to_dataframe(self.dataframes)\n # store data internally as List[Tuple[str, pandas.DataFrame]]\n # if str is not empty it will be set in ``DataEntry`` as ``item_id``.\n if isinstance(self.dataframes, dict):\n self._dataframes = list(self.dataframes.items())\n elif isinstance(self.dataframes, list):\n self._dataframes = [(None, df) for df in self.dataframes]\n else: # case single dataframe\n self._dataframes = [(None, self.dataframes)]\n\n for i, (item_id, df) in enumerate(self._dataframes):\n if self.timestamp:\n df = df.set_index(keys=self.timestamp)\n\n if not isinstance(df.index, pd.PeriodIndex):\n df.index = pd.to_datetime(df.index)\n df = df.to_period(freq=self.freq)\n\n df.sort_index(inplace=True)\n\n assert is_uniform(df.index), (\n \"Dataframe index is not uniformly spaced. \"\n \"If your dataframe contains data from multiple series in the \"\n 'same column (\"long\" format), consider constructing the '\n \"dataset with `PandasDataset.from_long_dataframe` instead.\"\n )\n\n self._dataframes[i] = (item_id, df)\n\n if not self.freq: # infer frequency from index\n self.freq = self._dataframes[0][1].index.freqstr\n\n self.process = ProcessDataEntry(\n cast(str, self.freq), one_dim_target=self.one_dim_target\n )\n\n def _dataentry(\n self, item_id: Optional[str], df: pd.DataFrame\n ) -> DataEntry:\n dataentry = as_dataentry(\n data=df,\n target=self.target,\n feat_dynamic_real=self.feat_dynamic_real,\n feat_dynamic_cat=self.feat_dynamic_cat,\n feat_static_real=self.feat_static_real,\n feat_static_cat=self.feat_static_cat,\n past_feat_dynamic_real=self.past_feat_dynamic_real,\n )\n if item_id is not None:\n dataentry[\"item_id\"] = item_id\n return dataentry\n\n def __iter__(self) -> Iterator[DataEntry]:\n for item_id, df in self._dataframes:\n dataentry = self.process(self._dataentry(item_id, df))\n if self.ignore_last_n_targets:\n dataentry = prepare_prediction_data(\n dataentry, self.ignore_last_n_targets\n )\n yield dataentry\n\n def __len__(self) -> int:\n return len(self._dataframes)\n\n @classmethod\n def from_long_dataframe(\n cls, dataframe: pd.DataFrame, item_id: str, **kwargs\n ) -> \"PandasDataset\":\n \"\"\"\n Construct ``PandasDataset`` out of a long dataframe.\n A long dataframe uses the long format for each variable. Target time\n series values, for example, are stacked on top of each other rather\n than side-by-side. The same is true for other dynamic or categorical\n features.\n\n Parameters\n ----------\n dataframe\n pandas.DataFrame containing at least ``timestamp``, ``target`` and\n ``item_id`` columns.\n item_id\n Name of the column that, when grouped by, gives the different time\n series.\n **kwargs\n Additional arguments. Same as of PandasDataset class.\n\n Returns\n -------\n PandasDataset\n Gluonts dataset based on ``pandas.DataFrame``s.\n \"\"\"\n return cls(dataframes=dict(list(dataframe.groupby(item_id))), **kwargs)\n\n\ndef series_to_dataframe(\n series: Union[pd.Series, List[pd.Series], Dict[str, pd.Series]]\n) -> Union[pd.DataFrame, List[pd.DataFrame], Dict[str, pd.DataFrame]]:\n def to_df(series):\n assert isinstance(\n series.index, DatetimeIndexOpsMixin\n ), \"series index has to be a DatetimeIndex.\"\n return series.to_frame(name=\"target\")\n\n if isinstance(series, list):\n return list(map(to_df, series))\n elif isinstance(series, dict):\n return valmap(to_df, series)\n return to_df(series)\n\n\ndef is_series(series: Any) -> bool:\n \"\"\"\n return True if ``series`` is ``pd.Series`` or a collection of\n ``pd.Series``.\n \"\"\"\n if isinstance(series, list):\n return is_series(series[0])\n elif isinstance(series, dict):\n return is_series(list(series.values()))\n return isinstance(series, pd.Series)\n\n\ndef as_dataentry(\n data: pd.DataFrame,\n target: Union[str, List[str]],\n timestamp: Optional[str] = None,\n feat_dynamic_real: List[str] = [],\n feat_dynamic_cat: List[str] = [],\n feat_static_real: List[str] = [],\n feat_static_cat: List[str] = [],\n past_feat_dynamic_real: List[str] = [],\n) -> DataEntry:\n \"\"\"\n Convert a single time series (uni- or multi-variate) that is given in\n a pandas.DataFrame format to a DataEntry.\n\n Parameters\n ----------\n data\n pandas.DataFrame containing at least ``timestamp``, ``target`` and\n ``item_id`` columns.\n target\n Name of the column that contains the ``target`` time series.\n For multivariate targets ``target`` is expecting a list of column\n names.\n timestamp\n Name of the column that contains the timestamp information.\n If ``None`` the index of ``data`` is assumed to be the time.\n feat_dynamic_real\n List of column names that contain dynamic real features.\n feat_dynamic_cat\n List of column names that contain dynamic categorical features.\n feat_static_real\n List of column names that contain static real features.\n feat_static_cat\n List of column names that contain static categorical features.\n past_feat_dynamic_real\n List of column names that contain dynamic real features only for\n the history.\n\n Returns\n -------\n DataEntry\n A dictionary with at least ``target`` and ``start`` field.\n \"\"\"\n start = data.loc[:, timestamp].iloc[0] if timestamp else data.index[0]\n dataentry = {FieldName.START: start}\n\n def set_field(fieldname, col_names, f=lambda x: x):\n if col_names:\n dataentry[fieldname] = [\n f(data.loc[:, n].to_list()) for n in col_names\n ]\n\n if isinstance(target, str):\n dataentry[FieldName.TARGET] = data.loc[:, target].to_list()\n else:\n set_field(FieldName.TARGET, target)\n set_field(FieldName.FEAT_DYNAMIC_REAL, feat_dynamic_real)\n set_field(FieldName.FEAT_DYNAMIC_CAT, feat_dynamic_cat)\n set_field(FieldName.FEAT_STATIC_REAL, feat_static_real, lambda x: x[0])\n set_field(FieldName.FEAT_STATIC_CAT, feat_static_cat, lambda x: x[0])\n set_field(FieldName.PAST_FEAT_DYNAMIC_REAL, past_feat_dynamic_real)\n return dataentry\n\n\ndef prepare_prediction_data(\n dataentry: DataEntry, ignore_last_n_targets: int\n) -> DataEntry:\n \"\"\"\n Remove ``ignore_last_n_targets`` values from ``target`` and\n ``past_feat_dynamic_real``. Works in univariate and multivariate case.\n\n >>> prepare_prediction_data(\n >>> {\"target\": np.array([1., 2., 3., 4.])}, ignore_last_n_targets=2\n >>> )\n {'target': array([1., 2.])}\n \"\"\"\n entry = deepcopy(dataentry)\n for fname in [FieldName.TARGET, FieldName.PAST_FEAT_DYNAMIC_REAL]:\n if fname in entry:\n entry[fname] = entry[fname][..., :-ignore_last_n_targets]\n return entry\n\n\ndef is_uniform(index: pd.PeriodIndex) -> bool:\n \"\"\"\n Check if ``index`` contains monotonically increasing periods, evenly spaced\n with frequency ``index.freq``.\n\n >>> ts = [\"2021-01-01 00:00\", \"2021-01-01 02:00\", \"2021-01-01 04:00\"]\n >>> is_uniform(pd.DatetimeIndex(ts).to_period(\"2H\"))\n True\n >>> ts = [\"2021-01-01 00:00\", \"2021-01-01 04:00\"]\n >>> is_uniform(pd.DatetimeIndex(ts).to_period(\"2H\"))\n False\n \"\"\"\n return (index[1:] - index[:-1] == index.freq).all()\n", "path": "src/gluonts/dataset/pandas.py"}]}
| 4,014 | 160 |
gh_patches_debug_5681
|
rasdani/github-patches
|
git_diff
|
getsentry__sentry-106
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
logging: __sentry__ isn't always callable
acdha@0af552b18d72594b183b18834ba7245a1ff5ade1 contains a trivial "fix" to avoid crashing when handling a value which has `__sentry__` set to `None` but I haven't had time to learn what `__sentry__` is supposed to do. `logging` is configured exactly as in the documentation, the code in question made a simple `logging.exception` call inside an `except` clause.
Here's a traceback - it's oddly formatted because the bug in question happened inside a django-tastypie resource handler:
```
Traceback (most recent call last):
File "/Users/cadams/.virtualenvs/MyProject/lib/python2.6/site-packages/tastypie/resources.py", line 178, in wrapper
return getattr(self,
view)(request,
*args,
**kwargs)
File "/Users/cadams/Projects/MyProject/MyProject/apps/api/resources.py", line 133, in dispatch_notes
return self.dispatch('notes',
request,
**kwargs)
File "/Users/cadams/.virtualenvs/MyProject/lib/python2.6/site-packages/tastypie/resources.py", line 350, in dispatch
response = method(request,
**kwargs)
File "/Users/cadams/Projects/MyProject/MyProject/apps/api/resources.py", line 167, in post_notes
bundle.data)
File "/System/Library/Frameworks/Python.framework/Versions/2.6/lib/python2.6/logging/__init__.py", line 1419, in exception
error(*((msg,
)+args),
**
{
'exc_info': 1
}
)
File "/System/Library/Frameworks/Python.framework/Versions/2.6/lib/python2.6/logging/__init__.py", line 1412, in error
root.error(*((msg,
)+args),
**kwargs)
File "/System/Library/Frameworks/Python.framework/Versions/2.6/lib/python2.6/logging/__init__.py", line 1047, in error
self._log(ERROR,
msg,
args,
**kwargs)
File "/System/Library/Frameworks/Python.framework/Versions/2.6/lib/python2.6/logging/__init__.py", line 1129, in _log
self.handle(record)
File "/System/Library/Frameworks/Python.framework/Versions/2.6/lib/python2.6/logging/__init__.py", line 1139, in handle
self.callHandlers(record)
File "/System/Library/Frameworks/Python.framework/Versions/2.6/lib/python2.6/logging/__init__.py", line 1176, in callHandlers
hdlr.handle(record)
File "/System/Library/Frameworks/Python.framework/Versions/2.6/lib/python2.6/logging/__init__.py", line 662, in handle
self.emit(record)
File "/Users/cadams/Projects/django-sentry/sentry/client/handlers.py", line 14, in emit
get_client().create_from_record(record)
File "/Users/cadams/Projects/django-sentry/sentry/client/base.py", line 157, in create_from_record
return self.create_from_exception(record.exc_info,
**kwargs)
File "/Users/cadams/Projects/django-sentry/sentry/client/base.py", line 189, in create_from_exception
frames = varmap(shorten,
reporter.get_traceback_frames())
File "/Users/cadams/Projects/django-sentry/sentry/helpers.py", line 58, in varmap
return
[
varmap(func,
f) for f in var
]
File "/Users/cadams/Projects/django-sentry/sentry/helpers.py", line 56, in varmap
return dict((k,
varmap(func,
v)) for k,
v in var.iteritems())
File "/Users/cadams/Projects/django-sentry/sentry/helpers.py", line 56, in <genexpr>
return dict((k,
varmap(func,
v)) for k,
v in var.iteritems())
File "/Users/cadams/Projects/django-sentry/sentry/helpers.py", line 58, in varmap
return
[
varmap(func,
f) for f in var
]
File "/Users/cadams/Projects/django-sentry/sentry/helpers.py", line 58, in varmap
return
[
varmap(func,
f) for f in var
]
File "/Users/cadams/Projects/django-sentry/sentry/helpers.py", line 60, in varmap
return func(var)
File "/Users/cadams/Projects/django-sentry/sentry/client/base.py", line 183, in shorten
var = transform(var)
File "/Users/cadams/Projects/django-sentry/sentry/helpers.py", line 80, in transform
return value.__sentry__()
TypeError: 'NoneType' object is not callable
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sentry/helpers.py`
Content:
```
1 import logging
2 import sys
3 import urllib
4 import urllib2
5 import uuid
6
7 import django
8 from django.conf import settings
9 from django.utils.encoding import force_unicode
10 from django.utils.hashcompat import md5_constructor
11
12 from sentry import conf
13
14 _FILTER_CACHE = None
15 def get_filters():
16 global _FILTER_CACHE
17
18 if _FILTER_CACHE is None:
19
20 filters = []
21 for filter_ in conf.FILTERS:
22 module_name, class_name = filter_.rsplit('.', 1)
23 try:
24 module = __import__(module_name, {}, {}, class_name)
25 filter_ = getattr(module, class_name)
26 except Exception:
27 logging.exception('Unable to import %s' % (filter_,))
28 continue
29 filters.append(filter_)
30 _FILTER_CACHE = filters
31 for f in _FILTER_CACHE:
32 yield f
33
34 def get_db_engine(alias='default'):
35 has_multidb = django.VERSION >= (1, 2)
36 if has_multidb:
37 value = settings.DATABASES[alias]['ENGINE']
38 else:
39 assert alias == 'default', 'You cannot fetch a database engine other than the default on Django < 1.2'
40 value = settings.DATABASE_ENGINE
41 return value.rsplit('.', 1)[-1]
42
43 def construct_checksum(level=logging.ERROR, class_name='', traceback='', message='', **kwargs):
44 checksum = md5_constructor(str(level))
45 checksum.update(class_name or '')
46 if traceback:
47 traceback = '\n'.join(traceback.split('\n')[:-3])
48 message = traceback or message
49 if isinstance(message, unicode):
50 message = message.encode('utf-8', 'replace')
51 checksum.update(message)
52 return checksum.hexdigest()
53
54 def varmap(func, var):
55 if isinstance(var, dict):
56 return dict((k, varmap(func, v)) for k, v in var.iteritems())
57 elif isinstance(var, (list, tuple)):
58 return [varmap(func, f) for f in var]
59 else:
60 return func(var)
61
62 def transform(value):
63 # TODO: make this extendable
64 # TODO: include some sane defaults, like UUID
65 # TODO: dont coerce strings to unicode, leave them as strings
66 if isinstance(value, (tuple, list, set, frozenset)):
67 return type(value)(transform(o) for o in value)
68 elif isinstance(value, uuid.UUID):
69 return repr(value)
70 elif isinstance(value, dict):
71 return dict((k, transform(v)) for k, v in value.iteritems())
72 elif isinstance(value, unicode):
73 return to_unicode(value)
74 elif isinstance(value, str):
75 try:
76 return str(value)
77 except:
78 return to_unicode(value)
79 elif hasattr(value, '__sentry__'):
80 return value.__sentry__()
81 elif not isinstance(value, (int, bool)) and value is not None:
82 # XXX: we could do transform(repr(value)) here
83 return to_unicode(value)
84 return value
85
86 def to_unicode(value):
87 try:
88 value = unicode(force_unicode(value))
89 except (UnicodeEncodeError, UnicodeDecodeError):
90 value = '(Error decoding value)'
91 except Exception: # in some cases we get a different exception
92 try:
93 value = str(repr(type(value)))
94 except Exception:
95 value = '(Error decoding value)'
96 return value
97
98 def get_installed_apps():
99 """
100 Generate a list of modules in settings.INSTALLED_APPS.
101 """
102 out = set()
103 for app in settings.INSTALLED_APPS:
104 out.add(app)
105 return out
106
107 class _Missing(object):
108
109 def __repr__(self):
110 return 'no value'
111
112 def __reduce__(self):
113 return '_missing'
114
115 _missing = _Missing()
116
117 class cached_property(object):
118 # This is borrowed from werkzeug : http://bytebucket.org/mitsuhiko/werkzeug-main
119 """A decorator that converts a function into a lazy property. The
120 function wrapped is called the first time to retrieve the result
121 and then that calculated result is used the next time you access
122 the value::
123
124 class Foo(object):
125
126 @cached_property
127 def foo(self):
128 # calculate something important here
129 return 42
130
131 The class has to have a `__dict__` in order for this property to
132 work.
133
134 .. versionchanged:: 0.6
135 the `writeable` attribute and parameter was deprecated. If a
136 cached property is writeable or not has to be documented now.
137 For performance reasons the implementation does not honor the
138 writeable setting and will always make the property writeable.
139 """
140
141 # implementation detail: this property is implemented as non-data
142 # descriptor. non-data descriptors are only invoked if there is
143 # no entry with the same name in the instance's __dict__.
144 # this allows us to completely get rid of the access function call
145 # overhead. If one choses to invoke __get__ by hand the property
146 # will still work as expected because the lookup logic is replicated
147 # in __get__ for manual invocation.
148
149 def __init__(self, func, name=None, doc=None, writeable=False):
150 if writeable:
151 from warnings import warn
152 warn(DeprecationWarning('the writeable argument to the '
153 'cached property is a noop since 0.6 '
154 'because the property is writeable '
155 'by default for performance reasons'))
156
157 self.__name__ = name or func.__name__
158 self.__module__ = func.__module__
159 self.__doc__ = doc or func.__doc__
160 self.func = func
161
162 def __get__(self, obj, type=None):
163 if obj is None:
164 return self
165 value = obj.__dict__.get(self.__name__, _missing)
166 if value is _missing:
167 value = self.func(obj)
168 obj.__dict__[self.__name__] = value
169 return value
170
171 def urlread(url, get={}, post={}, headers={}, timeout=None):
172 req = urllib2.Request(url, urllib.urlencode(get), headers=headers)
173 try:
174 response = urllib2.urlopen(req, urllib.urlencode(post), timeout).read()
175 except:
176 response = urllib2.urlopen(req, urllib.urlencode(post)).read()
177 return response
178
179 def get_versions(module_list=None):
180 if not module_list:
181 module_list = settings.INSTALLED_APPS + ['django']
182
183 ext_module_list = set()
184 for m in module_list:
185 parts = m.split('.')
186 ext_module_list.update('.'.join(parts[:idx]) for idx in xrange(1, len(parts)+1))
187
188 versions = {}
189 for module_name in ext_module_list:
190 __import__(module_name)
191 app = sys.modules[module_name]
192 if hasattr(app, 'get_version'):
193 get_version = app.get_version
194 if callable(get_version):
195 version = get_version()
196 else:
197 version = get_version
198 elif hasattr(app, 'VERSION'):
199 version = app.VERSION
200 elif hasattr(app, '__version__'):
201 version = app.__version__
202 else:
203 continue
204 if isinstance(version, (list, tuple)):
205 version = '.'.join(str(o) for o in version)
206 versions[module_name] = version
207 return versions
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/sentry/helpers.py b/sentry/helpers.py
--- a/sentry/helpers.py
+++ b/sentry/helpers.py
@@ -76,7 +76,7 @@
return str(value)
except:
return to_unicode(value)
- elif hasattr(value, '__sentry__'):
+ elif callable(getattr(value, '__sentry__', None)):
return value.__sentry__()
elif not isinstance(value, (int, bool)) and value is not None:
# XXX: we could do transform(repr(value)) here
|
{"golden_diff": "diff --git a/sentry/helpers.py b/sentry/helpers.py\n--- a/sentry/helpers.py\n+++ b/sentry/helpers.py\n@@ -76,7 +76,7 @@\n return str(value)\n except:\n return to_unicode(value)\n- elif hasattr(value, '__sentry__'):\n+ elif callable(getattr(value, '__sentry__', None)):\n return value.__sentry__()\n elif not isinstance(value, (int, bool)) and value is not None:\n # XXX: we could do transform(repr(value)) here\n", "issue": "logging: __sentry__ isn't always callable\nacdha@0af552b18d72594b183b18834ba7245a1ff5ade1 contains a trivial \"fix\" to avoid crashing when handling a value which has `__sentry__` set to `None` but I haven't had time to learn what `__sentry__` is supposed to do. `logging` is configured exactly as in the documentation, the code in question made a simple `logging.exception` call inside an `except` clause.\n\nHere's a traceback - it's oddly formatted because the bug in question happened inside a django-tastypie resource handler:\n\n```\nTraceback (most recent call last):\n\n File \"/Users/cadams/.virtualenvs/MyProject/lib/python2.6/site-packages/tastypie/resources.py\", line 178, in wrapper\n return getattr(self,\n view)(request,\n *args,\n **kwargs)\n\n File \"/Users/cadams/Projects/MyProject/MyProject/apps/api/resources.py\", line 133, in dispatch_notes\n return self.dispatch('notes',\n request,\n **kwargs)\n\n File \"/Users/cadams/.virtualenvs/MyProject/lib/python2.6/site-packages/tastypie/resources.py\", line 350, in dispatch\n response = method(request,\n **kwargs)\n\n File \"/Users/cadams/Projects/MyProject/MyProject/apps/api/resources.py\", line 167, in post_notes\n bundle.data)\n\n File \"/System/Library/Frameworks/Python.framework/Versions/2.6/lib/python2.6/logging/__init__.py\", line 1419, in exception\n error(*((msg,\n )+args),\n **\n {\n 'exc_info': 1\n }\n )\n\n File \"/System/Library/Frameworks/Python.framework/Versions/2.6/lib/python2.6/logging/__init__.py\", line 1412, in error\n root.error(*((msg,\n )+args),\n **kwargs)\n\n File \"/System/Library/Frameworks/Python.framework/Versions/2.6/lib/python2.6/logging/__init__.py\", line 1047, in error\n self._log(ERROR,\n msg,\n args,\n **kwargs)\n\n File \"/System/Library/Frameworks/Python.framework/Versions/2.6/lib/python2.6/logging/__init__.py\", line 1129, in _log\n self.handle(record)\n\n File \"/System/Library/Frameworks/Python.framework/Versions/2.6/lib/python2.6/logging/__init__.py\", line 1139, in handle\n self.callHandlers(record)\n\n File \"/System/Library/Frameworks/Python.framework/Versions/2.6/lib/python2.6/logging/__init__.py\", line 1176, in callHandlers\n hdlr.handle(record)\n\n File \"/System/Library/Frameworks/Python.framework/Versions/2.6/lib/python2.6/logging/__init__.py\", line 662, in handle\n self.emit(record)\n\n File \"/Users/cadams/Projects/django-sentry/sentry/client/handlers.py\", line 14, in emit\n get_client().create_from_record(record)\n\n File \"/Users/cadams/Projects/django-sentry/sentry/client/base.py\", line 157, in create_from_record\n return self.create_from_exception(record.exc_info,\n **kwargs)\n\n File \"/Users/cadams/Projects/django-sentry/sentry/client/base.py\", line 189, in create_from_exception\n frames = varmap(shorten,\n reporter.get_traceback_frames())\n\n File \"/Users/cadams/Projects/django-sentry/sentry/helpers.py\", line 58, in varmap\n return \n [\n varmap(func,\n f) for f in var\n ]\n\n\n File \"/Users/cadams/Projects/django-sentry/sentry/helpers.py\", line 56, in varmap\n return dict((k,\n varmap(func,\n v)) for k,\n v in var.iteritems())\n\n File \"/Users/cadams/Projects/django-sentry/sentry/helpers.py\", line 56, in <genexpr>\n return dict((k,\n varmap(func,\n v)) for k,\n v in var.iteritems())\n\n File \"/Users/cadams/Projects/django-sentry/sentry/helpers.py\", line 58, in varmap\n return \n [\n varmap(func,\n f) for f in var\n ]\n\n\n File \"/Users/cadams/Projects/django-sentry/sentry/helpers.py\", line 58, in varmap\n return \n [\n varmap(func,\n f) for f in var\n ]\n\n\n File \"/Users/cadams/Projects/django-sentry/sentry/helpers.py\", line 60, in varmap\n return func(var)\n\n File \"/Users/cadams/Projects/django-sentry/sentry/client/base.py\", line 183, in shorten\n var = transform(var)\n\n File \"/Users/cadams/Projects/django-sentry/sentry/helpers.py\", line 80, in transform\n return value.__sentry__()\n\nTypeError: 'NoneType' object is not callable\n```\n\n", "before_files": [{"content": "import logging\nimport sys\nimport urllib\nimport urllib2\nimport uuid\n\nimport django\nfrom django.conf import settings\nfrom django.utils.encoding import force_unicode\nfrom django.utils.hashcompat import md5_constructor\n\nfrom sentry import conf\n\n_FILTER_CACHE = None\ndef get_filters():\n global _FILTER_CACHE\n \n if _FILTER_CACHE is None:\n \n filters = []\n for filter_ in conf.FILTERS:\n module_name, class_name = filter_.rsplit('.', 1)\n try:\n module = __import__(module_name, {}, {}, class_name)\n filter_ = getattr(module, class_name)\n except Exception:\n logging.exception('Unable to import %s' % (filter_,))\n continue\n filters.append(filter_)\n _FILTER_CACHE = filters\n for f in _FILTER_CACHE:\n yield f\n\ndef get_db_engine(alias='default'):\n has_multidb = django.VERSION >= (1, 2)\n if has_multidb:\n value = settings.DATABASES[alias]['ENGINE']\n else:\n assert alias == 'default', 'You cannot fetch a database engine other than the default on Django < 1.2'\n value = settings.DATABASE_ENGINE\n return value.rsplit('.', 1)[-1]\n\ndef construct_checksum(level=logging.ERROR, class_name='', traceback='', message='', **kwargs):\n checksum = md5_constructor(str(level))\n checksum.update(class_name or '')\n if traceback:\n traceback = '\\n'.join(traceback.split('\\n')[:-3])\n message = traceback or message\n if isinstance(message, unicode):\n message = message.encode('utf-8', 'replace')\n checksum.update(message)\n return checksum.hexdigest()\n\ndef varmap(func, var):\n if isinstance(var, dict):\n return dict((k, varmap(func, v)) for k, v in var.iteritems())\n elif isinstance(var, (list, tuple)):\n return [varmap(func, f) for f in var]\n else:\n return func(var)\n\ndef transform(value):\n # TODO: make this extendable\n # TODO: include some sane defaults, like UUID\n # TODO: dont coerce strings to unicode, leave them as strings\n if isinstance(value, (tuple, list, set, frozenset)):\n return type(value)(transform(o) for o in value)\n elif isinstance(value, uuid.UUID):\n return repr(value)\n elif isinstance(value, dict):\n return dict((k, transform(v)) for k, v in value.iteritems())\n elif isinstance(value, unicode):\n return to_unicode(value)\n elif isinstance(value, str):\n try:\n return str(value)\n except:\n return to_unicode(value)\n elif hasattr(value, '__sentry__'):\n return value.__sentry__()\n elif not isinstance(value, (int, bool)) and value is not None:\n # XXX: we could do transform(repr(value)) here\n return to_unicode(value)\n return value\n\ndef to_unicode(value):\n try:\n value = unicode(force_unicode(value))\n except (UnicodeEncodeError, UnicodeDecodeError):\n value = '(Error decoding value)'\n except Exception: # in some cases we get a different exception\n try:\n value = str(repr(type(value)))\n except Exception:\n value = '(Error decoding value)'\n return value\n\ndef get_installed_apps():\n \"\"\"\n Generate a list of modules in settings.INSTALLED_APPS.\n \"\"\"\n out = set()\n for app in settings.INSTALLED_APPS:\n out.add(app)\n return out\n\nclass _Missing(object):\n\n def __repr__(self):\n return 'no value'\n\n def __reduce__(self):\n return '_missing'\n\n_missing = _Missing()\n\nclass cached_property(object):\n # This is borrowed from werkzeug : http://bytebucket.org/mitsuhiko/werkzeug-main\n \"\"\"A decorator that converts a function into a lazy property. The\n function wrapped is called the first time to retrieve the result\n and then that calculated result is used the next time you access\n the value::\n\n class Foo(object):\n\n @cached_property\n def foo(self):\n # calculate something important here\n return 42\n\n The class has to have a `__dict__` in order for this property to\n work.\n\n .. versionchanged:: 0.6\n the `writeable` attribute and parameter was deprecated. If a\n cached property is writeable or not has to be documented now.\n For performance reasons the implementation does not honor the\n writeable setting and will always make the property writeable.\n \"\"\"\n\n # implementation detail: this property is implemented as non-data\n # descriptor. non-data descriptors are only invoked if there is\n # no entry with the same name in the instance's __dict__.\n # this allows us to completely get rid of the access function call\n # overhead. If one choses to invoke __get__ by hand the property\n # will still work as expected because the lookup logic is replicated\n # in __get__ for manual invocation.\n\n def __init__(self, func, name=None, doc=None, writeable=False):\n if writeable:\n from warnings import warn\n warn(DeprecationWarning('the writeable argument to the '\n 'cached property is a noop since 0.6 '\n 'because the property is writeable '\n 'by default for performance reasons'))\n\n self.__name__ = name or func.__name__\n self.__module__ = func.__module__\n self.__doc__ = doc or func.__doc__\n self.func = func\n\n def __get__(self, obj, type=None):\n if obj is None:\n return self\n value = obj.__dict__.get(self.__name__, _missing)\n if value is _missing:\n value = self.func(obj)\n obj.__dict__[self.__name__] = value\n return value\n\ndef urlread(url, get={}, post={}, headers={}, timeout=None):\n req = urllib2.Request(url, urllib.urlencode(get), headers=headers)\n try:\n response = urllib2.urlopen(req, urllib.urlencode(post), timeout).read()\n except:\n response = urllib2.urlopen(req, urllib.urlencode(post)).read()\n return response\n\ndef get_versions(module_list=None):\n if not module_list:\n module_list = settings.INSTALLED_APPS + ['django']\n\n ext_module_list = set()\n for m in module_list:\n parts = m.split('.')\n ext_module_list.update('.'.join(parts[:idx]) for idx in xrange(1, len(parts)+1))\n\n versions = {}\n for module_name in ext_module_list:\n __import__(module_name)\n app = sys.modules[module_name]\n if hasattr(app, 'get_version'):\n get_version = app.get_version\n if callable(get_version):\n version = get_version()\n else:\n version = get_version\n elif hasattr(app, 'VERSION'):\n version = app.VERSION\n elif hasattr(app, '__version__'):\n version = app.__version__\n else:\n continue\n if isinstance(version, (list, tuple)):\n version = '.'.join(str(o) for o in version)\n versions[module_name] = version\n return versions", "path": "sentry/helpers.py"}], "after_files": [{"content": "import logging\nimport sys\nimport urllib\nimport urllib2\nimport uuid\n\nimport django\nfrom django.conf import settings\nfrom django.utils.encoding import force_unicode\nfrom django.utils.hashcompat import md5_constructor\n\nfrom sentry import conf\n\n_FILTER_CACHE = None\ndef get_filters():\n global _FILTER_CACHE\n \n if _FILTER_CACHE is None:\n \n filters = []\n for filter_ in conf.FILTERS:\n module_name, class_name = filter_.rsplit('.', 1)\n try:\n module = __import__(module_name, {}, {}, class_name)\n filter_ = getattr(module, class_name)\n except Exception:\n logging.exception('Unable to import %s' % (filter_,))\n continue\n filters.append(filter_)\n _FILTER_CACHE = filters\n for f in _FILTER_CACHE:\n yield f\n\ndef get_db_engine(alias='default'):\n has_multidb = django.VERSION >= (1, 2)\n if has_multidb:\n value = settings.DATABASES[alias]['ENGINE']\n else:\n assert alias == 'default', 'You cannot fetch a database engine other than the default on Django < 1.2'\n value = settings.DATABASE_ENGINE\n return value.rsplit('.', 1)[-1]\n\ndef construct_checksum(level=logging.ERROR, class_name='', traceback='', message='', **kwargs):\n checksum = md5_constructor(str(level))\n checksum.update(class_name or '')\n if traceback:\n traceback = '\\n'.join(traceback.split('\\n')[:-3])\n message = traceback or message\n if isinstance(message, unicode):\n message = message.encode('utf-8', 'replace')\n checksum.update(message)\n return checksum.hexdigest()\n\ndef varmap(func, var):\n if isinstance(var, dict):\n return dict((k, varmap(func, v)) for k, v in var.iteritems())\n elif isinstance(var, (list, tuple)):\n return [varmap(func, f) for f in var]\n else:\n return func(var)\n\ndef transform(value):\n # TODO: make this extendable\n # TODO: include some sane defaults, like UUID\n # TODO: dont coerce strings to unicode, leave them as strings\n if isinstance(value, (tuple, list, set, frozenset)):\n return type(value)(transform(o) for o in value)\n elif isinstance(value, uuid.UUID):\n return repr(value)\n elif isinstance(value, dict):\n return dict((k, transform(v)) for k, v in value.iteritems())\n elif isinstance(value, unicode):\n return to_unicode(value)\n elif isinstance(value, str):\n try:\n return str(value)\n except:\n return to_unicode(value)\n elif callable(getattr(value, '__sentry__', None)):\n return value.__sentry__()\n elif not isinstance(value, (int, bool)) and value is not None:\n # XXX: we could do transform(repr(value)) here\n return to_unicode(value)\n return value\n\ndef to_unicode(value):\n try:\n value = unicode(force_unicode(value))\n except (UnicodeEncodeError, UnicodeDecodeError):\n value = '(Error decoding value)'\n except Exception: # in some cases we get a different exception\n try:\n value = str(repr(type(value)))\n except Exception:\n value = '(Error decoding value)'\n return value\n\ndef get_installed_apps():\n \"\"\"\n Generate a list of modules in settings.INSTALLED_APPS.\n \"\"\"\n out = set()\n for app in settings.INSTALLED_APPS:\n out.add(app)\n return out\n\nclass _Missing(object):\n\n def __repr__(self):\n return 'no value'\n\n def __reduce__(self):\n return '_missing'\n\n_missing = _Missing()\n\nclass cached_property(object):\n # This is borrowed from werkzeug : http://bytebucket.org/mitsuhiko/werkzeug-main\n \"\"\"A decorator that converts a function into a lazy property. The\n function wrapped is called the first time to retrieve the result\n and then that calculated result is used the next time you access\n the value::\n\n class Foo(object):\n\n @cached_property\n def foo(self):\n # calculate something important here\n return 42\n\n The class has to have a `__dict__` in order for this property to\n work.\n\n .. versionchanged:: 0.6\n the `writeable` attribute and parameter was deprecated. If a\n cached property is writeable or not has to be documented now.\n For performance reasons the implementation does not honor the\n writeable setting and will always make the property writeable.\n \"\"\"\n\n # implementation detail: this property is implemented as non-data\n # descriptor. non-data descriptors are only invoked if there is\n # no entry with the same name in the instance's __dict__.\n # this allows us to completely get rid of the access function call\n # overhead. If one choses to invoke __get__ by hand the property\n # will still work as expected because the lookup logic is replicated\n # in __get__ for manual invocation.\n\n def __init__(self, func, name=None, doc=None, writeable=False):\n if writeable:\n from warnings import warn\n warn(DeprecationWarning('the writeable argument to the '\n 'cached property is a noop since 0.6 '\n 'because the property is writeable '\n 'by default for performance reasons'))\n\n self.__name__ = name or func.__name__\n self.__module__ = func.__module__\n self.__doc__ = doc or func.__doc__\n self.func = func\n\n def __get__(self, obj, type=None):\n if obj is None:\n return self\n value = obj.__dict__.get(self.__name__, _missing)\n if value is _missing:\n value = self.func(obj)\n obj.__dict__[self.__name__] = value\n return value\n\ndef urlread(url, get={}, post={}, headers={}, timeout=None):\n req = urllib2.Request(url, urllib.urlencode(get), headers=headers)\n try:\n response = urllib2.urlopen(req, urllib.urlencode(post), timeout).read()\n except:\n response = urllib2.urlopen(req, urllib.urlencode(post)).read()\n return response\n\ndef get_versions(module_list=None):\n if not module_list:\n module_list = settings.INSTALLED_APPS + ['django']\n\n ext_module_list = set()\n for m in module_list:\n parts = m.split('.')\n ext_module_list.update('.'.join(parts[:idx]) for idx in xrange(1, len(parts)+1))\n\n versions = {}\n for module_name in ext_module_list:\n __import__(module_name)\n app = sys.modules[module_name]\n if hasattr(app, 'get_version'):\n get_version = app.get_version\n if callable(get_version):\n version = get_version()\n else:\n version = get_version\n elif hasattr(app, 'VERSION'):\n version = app.VERSION\n elif hasattr(app, '__version__'):\n version = app.__version__\n else:\n continue\n if isinstance(version, (list, tuple)):\n version = '.'.join(str(o) for o in version)\n versions[module_name] = version\n return versions", "path": "sentry/helpers.py"}]}
| 3,500 | 117 |
gh_patches_debug_27472
|
rasdani/github-patches
|
git_diff
|
liqd__a4-meinberlin-2899
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
voting in brainstorming
the votings are shown on pop up for ideas within brainstorming although there is no voting.
<img width="332" alt="bildschirmfoto 2019-02-05 um 15 01 57" src="https://user-images.githubusercontent.com/35491681/52278354-20299380-2957-11e9-8368-dfb42c142a3a.png">
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `meinberlin/apps/newsletters/emails.py`
Content:
```
1 from email.mime.image import MIMEImage
2
3 from django.apps import apps
4 from django.conf import settings
5 from django.contrib import auth
6
7 from adhocracy4.emails.mixins import ReportToAdminEmailMixin
8 from meinberlin.apps.contrib.emails import Email
9
10 Organisation = apps.get_model(settings.A4_ORGANISATIONS_MODEL)
11 User = auth.get_user_model()
12
13
14 class NewsletterEmail(ReportToAdminEmailMixin, Email):
15 template_name = 'meinberlin_newsletters/emails/newsletter_email'
16
17 def dispatch(self, object, *args, **kwargs):
18 organisation_pk = kwargs.pop('organisation_pk', None)
19 organisation = None
20 if organisation_pk:
21 organisation = Organisation.objects.get(pk=organisation_pk)
22 kwargs['organisation'] = organisation
23
24 return super().dispatch(object, *args, **kwargs)
25
26 def get_reply_to(self):
27 return ['{} <{}>'.format(self.object.sender_name, self.object.sender)]
28
29 def get_receivers(self):
30 return User.objects\
31 .filter(id__in=self.kwargs['participant_ids'])\
32 .filter(get_newsletters=True)\
33 .filter(is_active=True)\
34 .distinct()
35
36 def get_attachments(self):
37 attachments = super().get_attachments()
38
39 organisation = self.kwargs['organisation']
40 if organisation and organisation.logo:
41 f = open(organisation.logo.path, 'rb')
42 logo = MIMEImage(f.read())
43 logo.add_header('Content-ID', '<{}>'.format('organisation_logo'))
44 attachments += [logo]
45
46 return attachments
47
48
49 class NewsletterEmailAll(NewsletterEmail):
50
51 def get_receivers(self):
52 return User.objects\
53 .filter(is_active=True)\
54 .distinct()
55
```
Path: `meinberlin/apps/users/admin.py`
Content:
```
1 from django.contrib import admin
2 from django.contrib import auth
3 from django.contrib.auth.models import Group
4 from django.utils.translation import ugettext_lazy as _
5
6 from . import models
7 from .forms import UserAdminForm
8
9
10 class UserAdmin(auth.admin.UserAdmin):
11 form = UserAdminForm
12 fieldsets = (
13 (None, {'fields': ('username', 'email', 'password', 'groups')}),
14 (_('Permissions'), {'fields': ('is_staff', 'is_superuser')}),
15 (_('Important dates'), {'fields': ('last_login', 'date_joined')}),
16 )
17 add_fieldsets = (
18 (None, {
19 'classes': ('wide',),
20 'fields': ('username', 'email', 'password1', 'password2'),
21 }),
22 )
23 readonly_fields = ('date_joined', 'last_login')
24 list_display = (
25 'id', 'username', 'email', 'date_joined', 'last_login', 'is_staff',
26 'is_superuser'
27 )
28 list_filter = ('is_staff', 'is_superuser', 'last_login')
29 search_fields = ('username', 'email', 'id')
30
31
32 class GroupAdmin(admin.ModelAdmin):
33 fieldsets = (
34 (None, {'fields': ('name', )}),
35 )
36
37
38 admin.site.register(models.User, UserAdmin)
39 admin.site.unregister(Group)
40 admin.site.register(Group, GroupAdmin)
41
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/meinberlin/apps/newsletters/emails.py b/meinberlin/apps/newsletters/emails.py
--- a/meinberlin/apps/newsletters/emails.py
+++ b/meinberlin/apps/newsletters/emails.py
@@ -50,5 +50,6 @@
def get_receivers(self):
return User.objects\
+ .filter(get_newsletters=True)\
.filter(is_active=True)\
.distinct()
diff --git a/meinberlin/apps/users/admin.py b/meinberlin/apps/users/admin.py
--- a/meinberlin/apps/users/admin.py
+++ b/meinberlin/apps/users/admin.py
@@ -12,7 +12,8 @@
fieldsets = (
(None, {'fields': ('username', 'email', 'password', 'groups')}),
(_('Permissions'), {'fields': ('is_staff', 'is_superuser')}),
- (_('Important dates'), {'fields': ('last_login', 'date_joined')}),
+ (_('Important dates'),
+ {'fields': ('last_login', 'date_joined', 'get_newsletters')}),
)
add_fieldsets = (
(None, {
@@ -20,10 +21,10 @@
'fields': ('username', 'email', 'password1', 'password2'),
}),
)
- readonly_fields = ('date_joined', 'last_login')
+ readonly_fields = ('date_joined', 'last_login', 'get_newsletters')
list_display = (
'id', 'username', 'email', 'date_joined', 'last_login', 'is_staff',
- 'is_superuser'
+ 'is_superuser', 'get_newsletters'
)
list_filter = ('is_staff', 'is_superuser', 'last_login')
search_fields = ('username', 'email', 'id')
|
{"golden_diff": "diff --git a/meinberlin/apps/newsletters/emails.py b/meinberlin/apps/newsletters/emails.py\n--- a/meinberlin/apps/newsletters/emails.py\n+++ b/meinberlin/apps/newsletters/emails.py\n@@ -50,5 +50,6 @@\n \n def get_receivers(self):\n return User.objects\\\n+ .filter(get_newsletters=True)\\\n .filter(is_active=True)\\\n .distinct()\ndiff --git a/meinberlin/apps/users/admin.py b/meinberlin/apps/users/admin.py\n--- a/meinberlin/apps/users/admin.py\n+++ b/meinberlin/apps/users/admin.py\n@@ -12,7 +12,8 @@\n fieldsets = (\n (None, {'fields': ('username', 'email', 'password', 'groups')}),\n (_('Permissions'), {'fields': ('is_staff', 'is_superuser')}),\n- (_('Important dates'), {'fields': ('last_login', 'date_joined')}),\n+ (_('Important dates'),\n+ {'fields': ('last_login', 'date_joined', 'get_newsletters')}),\n )\n add_fieldsets = (\n (None, {\n@@ -20,10 +21,10 @@\n 'fields': ('username', 'email', 'password1', 'password2'),\n }),\n )\n- readonly_fields = ('date_joined', 'last_login')\n+ readonly_fields = ('date_joined', 'last_login', 'get_newsletters')\n list_display = (\n 'id', 'username', 'email', 'date_joined', 'last_login', 'is_staff',\n- 'is_superuser'\n+ 'is_superuser', 'get_newsletters'\n )\n list_filter = ('is_staff', 'is_superuser', 'last_login')\n search_fields = ('username', 'email', 'id')\n", "issue": "voting in brainstorming\nthe votings are shown on pop up for ideas within brainstorming although there is no voting.\r\n\r\n<img width=\"332\" alt=\"bildschirmfoto 2019-02-05 um 15 01 57\" src=\"https://user-images.githubusercontent.com/35491681/52278354-20299380-2957-11e9-8368-dfb42c142a3a.png\">\r\n\n", "before_files": [{"content": "from email.mime.image import MIMEImage\n\nfrom django.apps import apps\nfrom django.conf import settings\nfrom django.contrib import auth\n\nfrom adhocracy4.emails.mixins import ReportToAdminEmailMixin\nfrom meinberlin.apps.contrib.emails import Email\n\nOrganisation = apps.get_model(settings.A4_ORGANISATIONS_MODEL)\nUser = auth.get_user_model()\n\n\nclass NewsletterEmail(ReportToAdminEmailMixin, Email):\n template_name = 'meinberlin_newsletters/emails/newsletter_email'\n\n def dispatch(self, object, *args, **kwargs):\n organisation_pk = kwargs.pop('organisation_pk', None)\n organisation = None\n if organisation_pk:\n organisation = Organisation.objects.get(pk=organisation_pk)\n kwargs['organisation'] = organisation\n\n return super().dispatch(object, *args, **kwargs)\n\n def get_reply_to(self):\n return ['{} <{}>'.format(self.object.sender_name, self.object.sender)]\n\n def get_receivers(self):\n return User.objects\\\n .filter(id__in=self.kwargs['participant_ids'])\\\n .filter(get_newsletters=True)\\\n .filter(is_active=True)\\\n .distinct()\n\n def get_attachments(self):\n attachments = super().get_attachments()\n\n organisation = self.kwargs['organisation']\n if organisation and organisation.logo:\n f = open(organisation.logo.path, 'rb')\n logo = MIMEImage(f.read())\n logo.add_header('Content-ID', '<{}>'.format('organisation_logo'))\n attachments += [logo]\n\n return attachments\n\n\nclass NewsletterEmailAll(NewsletterEmail):\n\n def get_receivers(self):\n return User.objects\\\n .filter(is_active=True)\\\n .distinct()\n", "path": "meinberlin/apps/newsletters/emails.py"}, {"content": "from django.contrib import admin\nfrom django.contrib import auth\nfrom django.contrib.auth.models import Group\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom . import models\nfrom .forms import UserAdminForm\n\n\nclass UserAdmin(auth.admin.UserAdmin):\n form = UserAdminForm\n fieldsets = (\n (None, {'fields': ('username', 'email', 'password', 'groups')}),\n (_('Permissions'), {'fields': ('is_staff', 'is_superuser')}),\n (_('Important dates'), {'fields': ('last_login', 'date_joined')}),\n )\n add_fieldsets = (\n (None, {\n 'classes': ('wide',),\n 'fields': ('username', 'email', 'password1', 'password2'),\n }),\n )\n readonly_fields = ('date_joined', 'last_login')\n list_display = (\n 'id', 'username', 'email', 'date_joined', 'last_login', 'is_staff',\n 'is_superuser'\n )\n list_filter = ('is_staff', 'is_superuser', 'last_login')\n search_fields = ('username', 'email', 'id')\n\n\nclass GroupAdmin(admin.ModelAdmin):\n fieldsets = (\n (None, {'fields': ('name', )}),\n )\n\n\nadmin.site.register(models.User, UserAdmin)\nadmin.site.unregister(Group)\nadmin.site.register(Group, GroupAdmin)\n", "path": "meinberlin/apps/users/admin.py"}], "after_files": [{"content": "from email.mime.image import MIMEImage\n\nfrom django.apps import apps\nfrom django.conf import settings\nfrom django.contrib import auth\n\nfrom adhocracy4.emails.mixins import ReportToAdminEmailMixin\nfrom meinberlin.apps.contrib.emails import Email\n\nOrganisation = apps.get_model(settings.A4_ORGANISATIONS_MODEL)\nUser = auth.get_user_model()\n\n\nclass NewsletterEmail(ReportToAdminEmailMixin, Email):\n template_name = 'meinberlin_newsletters/emails/newsletter_email'\n\n def dispatch(self, object, *args, **kwargs):\n organisation_pk = kwargs.pop('organisation_pk', None)\n organisation = None\n if organisation_pk:\n organisation = Organisation.objects.get(pk=organisation_pk)\n kwargs['organisation'] = organisation\n\n return super().dispatch(object, *args, **kwargs)\n\n def get_reply_to(self):\n return ['{} <{}>'.format(self.object.sender_name, self.object.sender)]\n\n def get_receivers(self):\n return User.objects\\\n .filter(id__in=self.kwargs['participant_ids'])\\\n .filter(get_newsletters=True)\\\n .filter(is_active=True)\\\n .distinct()\n\n def get_attachments(self):\n attachments = super().get_attachments()\n\n organisation = self.kwargs['organisation']\n if organisation and organisation.logo:\n f = open(organisation.logo.path, 'rb')\n logo = MIMEImage(f.read())\n logo.add_header('Content-ID', '<{}>'.format('organisation_logo'))\n attachments += [logo]\n\n return attachments\n\n\nclass NewsletterEmailAll(NewsletterEmail):\n\n def get_receivers(self):\n return User.objects\\\n .filter(get_newsletters=True)\\\n .filter(is_active=True)\\\n .distinct()\n", "path": "meinberlin/apps/newsletters/emails.py"}, {"content": "from django.contrib import admin\nfrom django.contrib import auth\nfrom django.contrib.auth.models import Group\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom . import models\nfrom .forms import UserAdminForm\n\n\nclass UserAdmin(auth.admin.UserAdmin):\n form = UserAdminForm\n fieldsets = (\n (None, {'fields': ('username', 'email', 'password', 'groups')}),\n (_('Permissions'), {'fields': ('is_staff', 'is_superuser')}),\n (_('Important dates'),\n {'fields': ('last_login', 'date_joined', 'get_newsletters')}),\n )\n add_fieldsets = (\n (None, {\n 'classes': ('wide',),\n 'fields': ('username', 'email', 'password1', 'password2'),\n }),\n )\n readonly_fields = ('date_joined', 'last_login', 'get_newsletters')\n list_display = (\n 'id', 'username', 'email', 'date_joined', 'last_login', 'is_staff',\n 'is_superuser', 'get_newsletters'\n )\n list_filter = ('is_staff', 'is_superuser', 'last_login')\n search_fields = ('username', 'email', 'id')\n\n\nclass GroupAdmin(admin.ModelAdmin):\n fieldsets = (\n (None, {'fields': ('name', )}),\n )\n\n\nadmin.site.register(models.User, UserAdmin)\nadmin.site.unregister(Group)\nadmin.site.register(Group, GroupAdmin)\n", "path": "meinberlin/apps/users/admin.py"}]}
| 1,236 | 401 |
gh_patches_debug_47456
|
rasdani/github-patches
|
git_diff
|
awslabs__gluonts-536
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Binned distribution gives wrong quantiles if the probability of first bin is nonzero
## Description
While expanding bin centers, [the code](https://github.com/awslabs/gluon-ts/blob/a9cde7bf92178b4cf8010a4c8600940b595f2ae3/src/gluonts/distribution/binned.py#L154) incorrectly adds the probabilities of the first bin to the bin centers ([`zeros_cdf` has the probabilities of the initial bin for all batch elements/time points](https://github.com/awslabs/gluon-ts/blob/a9cde7bf92178b4cf8010a4c8600940b595f2ae3/src/gluonts/distribution/binned.py#L138)).
Also the [index](https://github.com/awslabs/gluon-ts/blob/a9cde7bf92178b4cf8010a4c8600940b595f2ae3/src/gluonts/distribution/binned.py#L146) returned is always zero and hence the quantile returned is the first bin + probability of the first bin, unless the initial bin probability is zero which is always the case in [tests](https://github.com/awslabs/gluon-ts/blob/a9cde7bf92178b4cf8010a4c8600940b595f2ae3/test/distribution/test_distribution_sampling.py#L84).
## To Reproduce
```
In [1]: import mxnet as mx
from
In [2]: from gluonts.distribution import Binned
In [3]: binned = Binned(bin_probs=mx.nd.array([[0.5, 0.2, 0.15, 0.15]]), bin_centers=mx.nd.array([[1e-4, 1e-3, 1e-2, 1e-1]]))
In [4]: binned.quantile(mx.nd.array([0.1, 0.5, 0.7, 0.9, 0.999]))
Out[4]:
[[0.5001]
[0.5001]
[0.5001]
[0.5001]
[0.5001]]
<NDArray 5x1 @cpu(0)>
```
## Fix
Replacing `broadcast_add` by `broadcast_mul` [here](https://github.com/awslabs/gluon-ts/blob/a9cde7bf92178b4cf8010a4c8600940b595f2ae3/src/gluonts/distribution/binned.py#L138) seems to solve both problems. I think `zeros_cdf` was meant to be zeros not the probabilities of the first bin.
After fix:
```
In [4]: binned.quantile(mx.nd.array([0.1, 0.5, 0.7, 0.9, 0.999]))
Out[4]:
[[1.e-04]
[1.e-03]
[1.e-02]
[1.e-01]
[1.e-01]]
<NDArray 5x1 @cpu(0)>
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/gluonts/distribution/binned.py`
Content:
```
1 # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License").
4 # You may not use this file except in compliance with the License.
5 # A copy of the License is located at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # or in the "license" file accompanying this file. This file is distributed
10 # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
11 # express or implied. See the License for the specific language governing
12 # permissions and limitations under the License.
13
14 # Standard library imports
15 from typing import Tuple, List
16
17 # Third-party imports
18 import mxnet as mx
19 from mxnet import gluon
20 import numpy as np
21
22 # First-party imports
23 from gluonts.core.component import validated
24 from gluonts.model.common import Tensor
25
26 # Relative imports
27 from .distribution import Distribution, _sample_multiple, getF
28 from .distribution_output import DistributionOutput
29
30
31 class Binned(Distribution):
32 r"""
33 A binned distribution defined by a set of bins via
34 bin centers and bin probabilities.
35
36 Parameters
37 ----------
38 bin_probs
39 Tensor containing the bin probabilities, of shape `(*batch_shape, num_bins)`.
40 bin_centers
41 Tensor containing the bin centers, of shape `(*batch_shape, num_bins)`.
42 F
43 """
44
45 is_reparameterizable = False
46
47 @validated()
48 def __init__(self, bin_probs: Tensor, bin_centers: Tensor, F=None) -> None:
49 self.bin_centers = bin_centers
50 self.bin_probs = bin_probs
51 self.F = F if F else getF(bin_probs)
52
53 self.bin_edges = Binned._compute_edges(self.F, bin_centers)
54
55 @staticmethod
56 def _compute_edges(F, bin_centers: Tensor) -> Tensor:
57 r"""
58 Computes the edges of the bins based on the centers. The first and last edge are set to :math:`10^{-10}` and
59 :math:`10^{10}`, repsectively.
60
61 Parameters
62 ----------
63 F
64 bin_centers
65 Tensor of shape `(*batch_shape, num_bins)`.
66
67 Returns
68 -------
69 Tensor
70 Tensor of shape (*batch.shape, num_bins+1)
71 """
72
73 low = (
74 F.zeros_like(bin_centers.slice_axis(axis=-1, begin=0, end=1))
75 - 1.0e10
76 )
77 high = (
78 F.zeros_like(bin_centers.slice_axis(axis=-1, begin=0, end=1))
79 + 1.0e10
80 )
81
82 means = (
83 bin_centers.slice_axis(axis=-1, begin=1, end=None)
84 + bin_centers.slice_axis(axis=-1, begin=0, end=-1)
85 ) / 2.0
86
87 return F.concat(low, means, high, dim=-1)
88
89 @property
90 def batch_shape(self) -> Tuple:
91 return self.bin_probs.shape[:-1]
92
93 @property
94 def event_shape(self) -> Tuple:
95 return ()
96
97 @property
98 def event_dim(self) -> int:
99 return 0
100
101 @property
102 def mean(self):
103 return (self.bin_probs * self.bin_centers).sum(axis=-1)
104
105 @property
106 def stddev(self):
107 Ex2 = (self.bin_probs * self.bin_centers.square()).sum(axis=-1)
108 return (Ex2 - self.mean.square()).sqrt()
109
110 def log_prob(self, x):
111 F = self.F
112 x = x.expand_dims(axis=-1)
113 # TODO: when mxnet has searchsorted replace this
114 left_edges = self.bin_edges.slice_axis(axis=-1, begin=0, end=-1)
115 right_edges = self.bin_edges.slice_axis(axis=-1, begin=1, end=None)
116 mask = F.broadcast_lesser_equal(left_edges, x) * F.broadcast_lesser(
117 x, right_edges
118 )
119 return F.broadcast_mul(self.bin_probs.log(), mask).sum(axis=-1)
120
121 def cdf(self, x: Tensor) -> Tensor:
122 F = self.F
123 x = x.expand_dims(axis=-1)
124 # left_edges = self.bin_edges.slice_axis(axis=-1, begin=0, end=-1)
125 mask = F.broadcast_lesser_equal(self.bin_centers, x)
126 return F.broadcast_mul(self.bin_probs, mask).sum(axis=-1)
127
128 def quantile(self, level: Tensor) -> Tensor:
129 F = self.F
130
131 probs = self.bin_probs.swapaxes(0, 1) # (num_bins, batch)
132 zeros_batch_size = F.slice_axis(probs, axis=0, begin=0, end=1).squeeze(
133 axis=0
134 ) # (batch_size,)
135
136 level = level.expand_dims(axis=0)
137 # cdf shape (batch_size, levels)
138 zeros_cdf = F.broadcast_add(
139 zeros_batch_size.expand_dims(axis=1), level.zeros_like()
140 )
141 start_state = (zeros_cdf, zeros_cdf.astype("int32"))
142
143 def step(p, state):
144 cdf, idx = state
145 cdf = F.broadcast_add(cdf, p.expand_dims(axis=1))
146 idx = F.where(F.broadcast_greater(cdf, level), idx, idx + 1)
147 return zeros_batch_size, (cdf, idx)
148
149 _, states = F.contrib.foreach(step, probs, start_state)
150 _, idx = states
151
152 # expand centers to shape (batch, levels, num_bins)
153 # so we can use pick with idx.shape = (batch, levels)
154 centers_expanded = F.broadcast_add(
155 self.bin_centers.expand_dims(axis=1),
156 zeros_cdf.expand_dims(axis=-1),
157 )
158 a = centers_expanded.pick(idx, axis=-1)
159 return a.swapaxes(0, 1)
160
161 def sample(self, num_samples=None, dtype=np.float32):
162 def s(bin_probs):
163 F = self.F
164 indices = F.sample_multinomial(bin_probs)
165 if num_samples is None:
166 return self.bin_centers.pick(indices, -1).reshape_like(
167 F.zeros_like(indices.astype("float32"))
168 )
169 else:
170 return F.repeat(
171 F.expand_dims(self.bin_centers, axis=0),
172 repeats=num_samples,
173 axis=0,
174 ).pick(indices, -1)
175
176 return _sample_multiple(s, self.bin_probs, num_samples=num_samples)
177
178 @property
179 def args(self) -> List:
180 return [self.bin_probs, self.bin_centers]
181
182
183 class BinnedArgs(gluon.HybridBlock):
184 def __init__(
185 self, num_bins: int, bin_centers: mx.nd.NDArray, **kwargs
186 ) -> None:
187 super().__init__(**kwargs)
188 self.num_bins = num_bins
189 with self.name_scope():
190 self.bin_centers = self.params.get_constant(
191 "bin_centers", bin_centers
192 )
193
194 # needs to be named self.proj for consistency with the
195 # ArgProj class and the inference tests
196 self.proj = gluon.nn.HybridSequential()
197 self.proj.add(
198 gluon.nn.Dense(
199 self.num_bins,
200 prefix="binproj",
201 flatten=False,
202 weight_initializer=mx.init.Xavier(),
203 )
204 )
205 self.proj.add(gluon.nn.HybridLambda("softmax"))
206
207 def hybrid_forward(
208 self, F, x: Tensor, bin_centers: Tensor
209 ) -> Tuple[Tensor, Tensor]:
210 ps = self.proj(x)
211 reshaped_probs = ps.reshape(shape=(-2, -1, self.num_bins), reverse=1)
212 bin_centers = F.broadcast_add(bin_centers, ps.zeros_like())
213 return reshaped_probs, bin_centers
214
215
216 class BinnedOutput(DistributionOutput):
217 distr_cls: type = Binned
218
219 @validated()
220 def __init__(self, bin_centers: mx.nd.NDArray) -> None:
221 self.bin_centers = bin_centers
222 self.num_bins = self.bin_centers.shape[0]
223 assert len(self.bin_centers.shape) == 1
224
225 def get_args_proj(self, *args, **kwargs) -> gluon.nn.HybridBlock:
226 return BinnedArgs(self.num_bins, self.bin_centers)
227
228 def distribution(self, args, scale=None) -> Binned:
229 probs = args[0]
230 bin_centers = args[1]
231 F = getF(probs)
232
233 bin_centers = F.broadcast_mul(bin_centers, F.ones_like(probs))
234
235 if scale is not None:
236 bin_centers = F.broadcast_mul(
237 bin_centers, scale.expand_dims(axis=-1)
238 )
239
240 return Binned(probs, bin_centers)
241
242 @property
243 def event_shape(self) -> Tuple:
244 return ()
245
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/gluonts/distribution/binned.py b/src/gluonts/distribution/binned.py
--- a/src/gluonts/distribution/binned.py
+++ b/src/gluonts/distribution/binned.py
@@ -129,9 +129,10 @@
F = self.F
probs = self.bin_probs.swapaxes(0, 1) # (num_bins, batch)
- zeros_batch_size = F.slice_axis(probs, axis=0, begin=0, end=1).squeeze(
- axis=0
- ) # (batch_size,)
+ # (batch_size,)
+ zeros_batch_size = F.zeros_like(
+ F.slice_axis(probs, axis=0, begin=0, end=1).squeeze(axis=0)
+ )
level = level.expand_dims(axis=0)
# cdf shape (batch_size, levels)
|
{"golden_diff": "diff --git a/src/gluonts/distribution/binned.py b/src/gluonts/distribution/binned.py\n--- a/src/gluonts/distribution/binned.py\n+++ b/src/gluonts/distribution/binned.py\n@@ -129,9 +129,10 @@\n F = self.F\n \n probs = self.bin_probs.swapaxes(0, 1) # (num_bins, batch)\n- zeros_batch_size = F.slice_axis(probs, axis=0, begin=0, end=1).squeeze(\n- axis=0\n- ) # (batch_size,)\n+ # (batch_size,)\n+ zeros_batch_size = F.zeros_like(\n+ F.slice_axis(probs, axis=0, begin=0, end=1).squeeze(axis=0)\n+ )\n \n level = level.expand_dims(axis=0)\n # cdf shape (batch_size, levels)\n", "issue": "Binned distribution gives wrong quantiles if the probability of first bin is nonzero\n## Description\r\nWhile expanding bin centers, [the code](https://github.com/awslabs/gluon-ts/blob/a9cde7bf92178b4cf8010a4c8600940b595f2ae3/src/gluonts/distribution/binned.py#L154) incorrectly adds the probabilities of the first bin to the bin centers ([`zeros_cdf` has the probabilities of the initial bin for all batch elements/time points](https://github.com/awslabs/gluon-ts/blob/a9cde7bf92178b4cf8010a4c8600940b595f2ae3/src/gluonts/distribution/binned.py#L138)).\r\n\r\nAlso the [index](https://github.com/awslabs/gluon-ts/blob/a9cde7bf92178b4cf8010a4c8600940b595f2ae3/src/gluonts/distribution/binned.py#L146) returned is always zero and hence the quantile returned is the first bin + probability of the first bin, unless the initial bin probability is zero which is always the case in [tests](https://github.com/awslabs/gluon-ts/blob/a9cde7bf92178b4cf8010a4c8600940b595f2ae3/test/distribution/test_distribution_sampling.py#L84).\r\n\r\n## To Reproduce\r\n```\r\nIn [1]: import mxnet as mx \r\nfrom\r\nIn [2]: from gluonts.distribution import Binned \r\n\r\nIn [3]: binned = Binned(bin_probs=mx.nd.array([[0.5, 0.2, 0.15, 0.15]]), bin_centers=mx.nd.array([[1e-4, 1e-3, 1e-2, 1e-1]])) \r\n\r\nIn [4]: binned.quantile(mx.nd.array([0.1, 0.5, 0.7, 0.9, 0.999])) \r\nOut[4]: \r\n\r\n[[0.5001]\r\n [0.5001]\r\n [0.5001]\r\n [0.5001]\r\n [0.5001]]\r\n<NDArray 5x1 @cpu(0)>\r\n\r\n```\r\n## Fix\r\nReplacing `broadcast_add` by `broadcast_mul` [here](https://github.com/awslabs/gluon-ts/blob/a9cde7bf92178b4cf8010a4c8600940b595f2ae3/src/gluonts/distribution/binned.py#L138) seems to solve both problems. I think `zeros_cdf` was meant to be zeros not the probabilities of the first bin.\r\n\r\nAfter fix:\r\n\r\n```\r\nIn [4]: binned.quantile(mx.nd.array([0.1, 0.5, 0.7, 0.9, 0.999])) \r\nOut[4]: \r\n\r\n[[1.e-04]\r\n [1.e-03]\r\n [1.e-02]\r\n [1.e-01]\r\n [1.e-01]]\r\n<NDArray 5x1 @cpu(0)>\r\n\r\n```\n", "before_files": [{"content": "# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\").\n# You may not use this file except in compliance with the License.\n# A copy of the License is located at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# or in the \"license\" file accompanying this file. This file is distributed\n# on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n# express or implied. See the License for the specific language governing\n# permissions and limitations under the License.\n\n# Standard library imports\nfrom typing import Tuple, List\n\n# Third-party imports\nimport mxnet as mx\nfrom mxnet import gluon\nimport numpy as np\n\n# First-party imports\nfrom gluonts.core.component import validated\nfrom gluonts.model.common import Tensor\n\n# Relative imports\nfrom .distribution import Distribution, _sample_multiple, getF\nfrom .distribution_output import DistributionOutput\n\n\nclass Binned(Distribution):\n r\"\"\"\n A binned distribution defined by a set of bins via\n bin centers and bin probabilities.\n\n Parameters\n ----------\n bin_probs\n Tensor containing the bin probabilities, of shape `(*batch_shape, num_bins)`.\n bin_centers\n Tensor containing the bin centers, of shape `(*batch_shape, num_bins)`.\n F\n \"\"\"\n\n is_reparameterizable = False\n\n @validated()\n def __init__(self, bin_probs: Tensor, bin_centers: Tensor, F=None) -> None:\n self.bin_centers = bin_centers\n self.bin_probs = bin_probs\n self.F = F if F else getF(bin_probs)\n\n self.bin_edges = Binned._compute_edges(self.F, bin_centers)\n\n @staticmethod\n def _compute_edges(F, bin_centers: Tensor) -> Tensor:\n r\"\"\"\n Computes the edges of the bins based on the centers. The first and last edge are set to :math:`10^{-10}` and\n :math:`10^{10}`, repsectively.\n\n Parameters\n ----------\n F\n bin_centers\n Tensor of shape `(*batch_shape, num_bins)`.\n\n Returns\n -------\n Tensor\n Tensor of shape (*batch.shape, num_bins+1)\n \"\"\"\n\n low = (\n F.zeros_like(bin_centers.slice_axis(axis=-1, begin=0, end=1))\n - 1.0e10\n )\n high = (\n F.zeros_like(bin_centers.slice_axis(axis=-1, begin=0, end=1))\n + 1.0e10\n )\n\n means = (\n bin_centers.slice_axis(axis=-1, begin=1, end=None)\n + bin_centers.slice_axis(axis=-1, begin=0, end=-1)\n ) / 2.0\n\n return F.concat(low, means, high, dim=-1)\n\n @property\n def batch_shape(self) -> Tuple:\n return self.bin_probs.shape[:-1]\n\n @property\n def event_shape(self) -> Tuple:\n return ()\n\n @property\n def event_dim(self) -> int:\n return 0\n\n @property\n def mean(self):\n return (self.bin_probs * self.bin_centers).sum(axis=-1)\n\n @property\n def stddev(self):\n Ex2 = (self.bin_probs * self.bin_centers.square()).sum(axis=-1)\n return (Ex2 - self.mean.square()).sqrt()\n\n def log_prob(self, x):\n F = self.F\n x = x.expand_dims(axis=-1)\n # TODO: when mxnet has searchsorted replace this\n left_edges = self.bin_edges.slice_axis(axis=-1, begin=0, end=-1)\n right_edges = self.bin_edges.slice_axis(axis=-1, begin=1, end=None)\n mask = F.broadcast_lesser_equal(left_edges, x) * F.broadcast_lesser(\n x, right_edges\n )\n return F.broadcast_mul(self.bin_probs.log(), mask).sum(axis=-1)\n\n def cdf(self, x: Tensor) -> Tensor:\n F = self.F\n x = x.expand_dims(axis=-1)\n # left_edges = self.bin_edges.slice_axis(axis=-1, begin=0, end=-1)\n mask = F.broadcast_lesser_equal(self.bin_centers, x)\n return F.broadcast_mul(self.bin_probs, mask).sum(axis=-1)\n\n def quantile(self, level: Tensor) -> Tensor:\n F = self.F\n\n probs = self.bin_probs.swapaxes(0, 1) # (num_bins, batch)\n zeros_batch_size = F.slice_axis(probs, axis=0, begin=0, end=1).squeeze(\n axis=0\n ) # (batch_size,)\n\n level = level.expand_dims(axis=0)\n # cdf shape (batch_size, levels)\n zeros_cdf = F.broadcast_add(\n zeros_batch_size.expand_dims(axis=1), level.zeros_like()\n )\n start_state = (zeros_cdf, zeros_cdf.astype(\"int32\"))\n\n def step(p, state):\n cdf, idx = state\n cdf = F.broadcast_add(cdf, p.expand_dims(axis=1))\n idx = F.where(F.broadcast_greater(cdf, level), idx, idx + 1)\n return zeros_batch_size, (cdf, idx)\n\n _, states = F.contrib.foreach(step, probs, start_state)\n _, idx = states\n\n # expand centers to shape (batch, levels, num_bins)\n # so we can use pick with idx.shape = (batch, levels)\n centers_expanded = F.broadcast_add(\n self.bin_centers.expand_dims(axis=1),\n zeros_cdf.expand_dims(axis=-1),\n )\n a = centers_expanded.pick(idx, axis=-1)\n return a.swapaxes(0, 1)\n\n def sample(self, num_samples=None, dtype=np.float32):\n def s(bin_probs):\n F = self.F\n indices = F.sample_multinomial(bin_probs)\n if num_samples is None:\n return self.bin_centers.pick(indices, -1).reshape_like(\n F.zeros_like(indices.astype(\"float32\"))\n )\n else:\n return F.repeat(\n F.expand_dims(self.bin_centers, axis=0),\n repeats=num_samples,\n axis=0,\n ).pick(indices, -1)\n\n return _sample_multiple(s, self.bin_probs, num_samples=num_samples)\n\n @property\n def args(self) -> List:\n return [self.bin_probs, self.bin_centers]\n\n\nclass BinnedArgs(gluon.HybridBlock):\n def __init__(\n self, num_bins: int, bin_centers: mx.nd.NDArray, **kwargs\n ) -> None:\n super().__init__(**kwargs)\n self.num_bins = num_bins\n with self.name_scope():\n self.bin_centers = self.params.get_constant(\n \"bin_centers\", bin_centers\n )\n\n # needs to be named self.proj for consistency with the\n # ArgProj class and the inference tests\n self.proj = gluon.nn.HybridSequential()\n self.proj.add(\n gluon.nn.Dense(\n self.num_bins,\n prefix=\"binproj\",\n flatten=False,\n weight_initializer=mx.init.Xavier(),\n )\n )\n self.proj.add(gluon.nn.HybridLambda(\"softmax\"))\n\n def hybrid_forward(\n self, F, x: Tensor, bin_centers: Tensor\n ) -> Tuple[Tensor, Tensor]:\n ps = self.proj(x)\n reshaped_probs = ps.reshape(shape=(-2, -1, self.num_bins), reverse=1)\n bin_centers = F.broadcast_add(bin_centers, ps.zeros_like())\n return reshaped_probs, bin_centers\n\n\nclass BinnedOutput(DistributionOutput):\n distr_cls: type = Binned\n\n @validated()\n def __init__(self, bin_centers: mx.nd.NDArray) -> None:\n self.bin_centers = bin_centers\n self.num_bins = self.bin_centers.shape[0]\n assert len(self.bin_centers.shape) == 1\n\n def get_args_proj(self, *args, **kwargs) -> gluon.nn.HybridBlock:\n return BinnedArgs(self.num_bins, self.bin_centers)\n\n def distribution(self, args, scale=None) -> Binned:\n probs = args[0]\n bin_centers = args[1]\n F = getF(probs)\n\n bin_centers = F.broadcast_mul(bin_centers, F.ones_like(probs))\n\n if scale is not None:\n bin_centers = F.broadcast_mul(\n bin_centers, scale.expand_dims(axis=-1)\n )\n\n return Binned(probs, bin_centers)\n\n @property\n def event_shape(self) -> Tuple:\n return ()\n", "path": "src/gluonts/distribution/binned.py"}], "after_files": [{"content": "# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\").\n# You may not use this file except in compliance with the License.\n# A copy of the License is located at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# or in the \"license\" file accompanying this file. This file is distributed\n# on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n# express or implied. See the License for the specific language governing\n# permissions and limitations under the License.\n\n# Standard library imports\nfrom typing import Tuple, List\n\n# Third-party imports\nimport mxnet as mx\nfrom mxnet import gluon\nimport numpy as np\n\n# First-party imports\nfrom gluonts.core.component import validated\nfrom gluonts.model.common import Tensor\n\n# Relative imports\nfrom .distribution import Distribution, _sample_multiple, getF\nfrom .distribution_output import DistributionOutput\n\n\nclass Binned(Distribution):\n r\"\"\"\n A binned distribution defined by a set of bins via\n bin centers and bin probabilities.\n\n Parameters\n ----------\n bin_probs\n Tensor containing the bin probabilities, of shape `(*batch_shape, num_bins)`.\n bin_centers\n Tensor containing the bin centers, of shape `(*batch_shape, num_bins)`.\n F\n \"\"\"\n\n is_reparameterizable = False\n\n @validated()\n def __init__(self, bin_probs: Tensor, bin_centers: Tensor, F=None) -> None:\n self.bin_centers = bin_centers\n self.bin_probs = bin_probs\n self.F = F if F else getF(bin_probs)\n\n self.bin_edges = Binned._compute_edges(self.F, bin_centers)\n\n @staticmethod\n def _compute_edges(F, bin_centers: Tensor) -> Tensor:\n r\"\"\"\n Computes the edges of the bins based on the centers. The first and last edge are set to :math:`10^{-10}` and\n :math:`10^{10}`, repsectively.\n\n Parameters\n ----------\n F\n bin_centers\n Tensor of shape `(*batch_shape, num_bins)`.\n\n Returns\n -------\n Tensor\n Tensor of shape (*batch.shape, num_bins+1)\n \"\"\"\n\n low = (\n F.zeros_like(bin_centers.slice_axis(axis=-1, begin=0, end=1))\n - 1.0e10\n )\n high = (\n F.zeros_like(bin_centers.slice_axis(axis=-1, begin=0, end=1))\n + 1.0e10\n )\n\n means = (\n bin_centers.slice_axis(axis=-1, begin=1, end=None)\n + bin_centers.slice_axis(axis=-1, begin=0, end=-1)\n ) / 2.0\n\n return F.concat(low, means, high, dim=-1)\n\n @property\n def batch_shape(self) -> Tuple:\n return self.bin_probs.shape[:-1]\n\n @property\n def event_shape(self) -> Tuple:\n return ()\n\n @property\n def event_dim(self) -> int:\n return 0\n\n @property\n def mean(self):\n return (self.bin_probs * self.bin_centers).sum(axis=-1)\n\n @property\n def stddev(self):\n Ex2 = (self.bin_probs * self.bin_centers.square()).sum(axis=-1)\n return (Ex2 - self.mean.square()).sqrt()\n\n def log_prob(self, x):\n F = self.F\n x = x.expand_dims(axis=-1)\n # TODO: when mxnet has searchsorted replace this\n left_edges = self.bin_edges.slice_axis(axis=-1, begin=0, end=-1)\n right_edges = self.bin_edges.slice_axis(axis=-1, begin=1, end=None)\n mask = F.broadcast_lesser_equal(left_edges, x) * F.broadcast_lesser(\n x, right_edges\n )\n return F.broadcast_mul(self.bin_probs.log(), mask).sum(axis=-1)\n\n def cdf(self, x: Tensor) -> Tensor:\n F = self.F\n x = x.expand_dims(axis=-1)\n # left_edges = self.bin_edges.slice_axis(axis=-1, begin=0, end=-1)\n mask = F.broadcast_lesser_equal(self.bin_centers, x)\n return F.broadcast_mul(self.bin_probs, mask).sum(axis=-1)\n\n def quantile(self, level: Tensor) -> Tensor:\n F = self.F\n\n probs = self.bin_probs.swapaxes(0, 1) # (num_bins, batch)\n # (batch_size,)\n zeros_batch_size = F.zeros_like(\n F.slice_axis(probs, axis=0, begin=0, end=1).squeeze(axis=0)\n )\n\n level = level.expand_dims(axis=0)\n # cdf shape (batch_size, levels)\n zeros_cdf = F.broadcast_add(\n zeros_batch_size.expand_dims(axis=1), level.zeros_like()\n )\n start_state = (zeros_cdf, zeros_cdf.astype(\"int32\"))\n\n def step(p, state):\n cdf, idx = state\n cdf = F.broadcast_add(cdf, p.expand_dims(axis=1))\n idx = F.where(F.broadcast_greater(cdf, level), idx, idx + 1)\n return zeros_batch_size, (cdf, idx)\n\n _, states = F.contrib.foreach(step, probs, start_state)\n _, idx = states\n\n # expand centers to shape (batch, levels, num_bins)\n # so we can use pick with idx.shape = (batch, levels)\n centers_expanded = F.broadcast_add(\n self.bin_centers.expand_dims(axis=1),\n zeros_cdf.expand_dims(axis=-1),\n )\n a = centers_expanded.pick(idx, axis=-1)\n return a.swapaxes(0, 1)\n\n def sample(self, num_samples=None, dtype=np.float32):\n def s(bin_probs):\n F = self.F\n indices = F.sample_multinomial(bin_probs)\n if num_samples is None:\n return self.bin_centers.pick(indices, -1).reshape_like(\n F.zeros_like(indices.astype(\"float32\"))\n )\n else:\n return F.repeat(\n F.expand_dims(self.bin_centers, axis=0),\n repeats=num_samples,\n axis=0,\n ).pick(indices, -1)\n\n return _sample_multiple(s, self.bin_probs, num_samples=num_samples)\n\n @property\n def args(self) -> List:\n return [self.bin_probs, self.bin_centers]\n\n\nclass BinnedArgs(gluon.HybridBlock):\n def __init__(\n self, num_bins: int, bin_centers: mx.nd.NDArray, **kwargs\n ) -> None:\n super().__init__(**kwargs)\n self.num_bins = num_bins\n with self.name_scope():\n self.bin_centers = self.params.get_constant(\n \"bin_centers\", bin_centers\n )\n\n # needs to be named self.proj for consistency with the\n # ArgProj class and the inference tests\n self.proj = gluon.nn.HybridSequential()\n self.proj.add(\n gluon.nn.Dense(\n self.num_bins,\n prefix=\"binproj\",\n flatten=False,\n weight_initializer=mx.init.Xavier(),\n )\n )\n self.proj.add(gluon.nn.HybridLambda(\"softmax\"))\n\n def hybrid_forward(\n self, F, x: Tensor, bin_centers: Tensor\n ) -> Tuple[Tensor, Tensor]:\n ps = self.proj(x)\n reshaped_probs = ps.reshape(shape=(-2, -1, self.num_bins), reverse=1)\n bin_centers = F.broadcast_add(bin_centers, ps.zeros_like())\n return reshaped_probs, bin_centers\n\n\nclass BinnedOutput(DistributionOutput):\n distr_cls: type = Binned\n\n @validated()\n def __init__(self, bin_centers: mx.nd.NDArray) -> None:\n self.bin_centers = bin_centers\n self.num_bins = self.bin_centers.shape[0]\n assert len(self.bin_centers.shape) == 1\n\n def get_args_proj(self, *args, **kwargs) -> gluon.nn.HybridBlock:\n return BinnedArgs(self.num_bins, self.bin_centers)\n\n def distribution(self, args, scale=None) -> Binned:\n probs = args[0]\n bin_centers = args[1]\n F = getF(probs)\n\n bin_centers = F.broadcast_mul(bin_centers, F.ones_like(probs))\n\n if scale is not None:\n bin_centers = F.broadcast_mul(\n bin_centers, scale.expand_dims(axis=-1)\n )\n\n return Binned(probs, bin_centers)\n\n @property\n def event_shape(self) -> Tuple:\n return ()\n", "path": "src/gluonts/distribution/binned.py"}]}
| 3,593 | 203 |
gh_patches_debug_20696
|
rasdani/github-patches
|
git_diff
|
DataDog__dd-trace-py-887
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
tests.internal.runtime.test_runtime_metrics.TestRuntimeWorker.test_worker_metrics fails randomly
```
def test_worker_metrics(self):
self.tracer.configure(collect_metrics=True)
with self.override_global_tracer(self.tracer):
self.tracer._dogstatsd_client = DogStatsd()
self.tracer._dogstatsd_client.socket = FakeSocket()
root = self.start_span('parent', service='parent')
context = root.context
self.start_span('child', service='child', child_of=context)
self.worker = RuntimeWorker(self.tracer._dogstatsd_client)
self.worker.start()
self.worker.stop()
# get all received metrics
received = []
while True:
new = self.tracer._dogstatsd_client.socket.recv()
if not new:
break
received.append(new)
# DEV: sleep since metrics will still be getting collected and written
time.sleep(.5)
# expect received all default metrics
> self.assertEqual(len(received), len(DEFAULT_RUNTIME_METRICS))
E AssertionError: 0 != 10
tests/internal/runtime/test_runtime_metrics.py:75: AssertionError
```
https://circleci.com/gh/DataDog/dd-trace-py/114364
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ddtrace/internal/runtime/runtime_metrics.py`
Content:
```
1 import threading
2 import time
3 import itertools
4
5 from ..logger import get_logger
6 from .constants import (
7 DEFAULT_RUNTIME_METRICS,
8 DEFAULT_RUNTIME_TAGS,
9 )
10 from .metric_collectors import (
11 GCRuntimeMetricCollector,
12 PSUtilRuntimeMetricCollector,
13 )
14 from .tag_collectors import (
15 TracerTagCollector,
16 )
17
18 log = get_logger(__name__)
19
20
21 class RuntimeCollectorsIterable(object):
22 def __init__(self, enabled=None):
23 self._enabled = enabled or self.ENABLED
24 # Initialize the collectors.
25 self._collectors = [c() for c in self.COLLECTORS]
26
27 def __iter__(self):
28 collected = (
29 collector.collect(self._enabled)
30 for collector in self._collectors
31 )
32 return itertools.chain.from_iterable(collected)
33
34 def __repr__(self):
35 return '{}(enabled={})'.format(
36 self.__class__.__name__,
37 self._enabled,
38 )
39
40
41 class RuntimeTags(RuntimeCollectorsIterable):
42 ENABLED = DEFAULT_RUNTIME_TAGS
43 COLLECTORS = [
44 TracerTagCollector,
45 ]
46
47
48 class RuntimeMetrics(RuntimeCollectorsIterable):
49 ENABLED = DEFAULT_RUNTIME_METRICS
50 COLLECTORS = [
51 GCRuntimeMetricCollector,
52 PSUtilRuntimeMetricCollector,
53 ]
54
55
56 class RuntimeWorker(object):
57 """ Worker thread for collecting and writing runtime metrics to a DogStatsd
58 client.
59 """
60
61 FLUSH_INTERVAL = 10
62
63 def __init__(self, statsd_client, flush_interval=None):
64 self._stay_alive = None
65 self._thread = None
66 self._flush_interval = flush_interval or self.FLUSH_INTERVAL
67 self._statsd_client = statsd_client
68 self._runtime_metrics = RuntimeMetrics()
69
70 def _target(self):
71 while self._stay_alive:
72 self.flush()
73 time.sleep(self._flush_interval)
74
75 def start(self):
76 if not self._thread:
77 log.debug('Starting {}'.format(self))
78 self._stay_alive = True
79 self._thread = threading.Thread(target=self._target)
80 self._thread.setDaemon(True)
81 self._thread.start()
82
83 def stop(self):
84 if self._thread and self._stay_alive:
85 log.debug('Stopping {}'.format(self))
86 self._stay_alive = False
87
88 def _write_metric(self, key, value):
89 log.debug('Writing metric {}:{}'.format(key, value))
90 self._statsd_client.gauge(key, value)
91
92 def flush(self):
93 if not self._statsd_client:
94 log.warn('Attempted flush with uninitialized or failed statsd client')
95 return
96
97 for key, value in self._runtime_metrics:
98 self._write_metric(key, value)
99
100 def reset(self):
101 self._runtime_metrics = RuntimeMetrics()
102
103 def __repr__(self):
104 return '{}(runtime_metrics={})'.format(
105 self.__class__.__name__,
106 self._runtime_metrics,
107 )
108
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/ddtrace/internal/runtime/runtime_metrics.py b/ddtrace/internal/runtime/runtime_metrics.py
--- a/ddtrace/internal/runtime/runtime_metrics.py
+++ b/ddtrace/internal/runtime/runtime_metrics.py
@@ -60,10 +60,10 @@
FLUSH_INTERVAL = 10
- def __init__(self, statsd_client, flush_interval=None):
+ def __init__(self, statsd_client, flush_interval=FLUSH_INTERVAL):
self._stay_alive = None
self._thread = None
- self._flush_interval = flush_interval or self.FLUSH_INTERVAL
+ self._flush_interval = flush_interval
self._statsd_client = statsd_client
self._runtime_metrics = RuntimeMetrics()
@@ -85,6 +85,10 @@
log.debug('Stopping {}'.format(self))
self._stay_alive = False
+ def join(self, timeout=None):
+ if self._thread:
+ return self._thread.join(timeout)
+
def _write_metric(self, key, value):
log.debug('Writing metric {}:{}'.format(key, value))
self._statsd_client.gauge(key, value)
|
{"golden_diff": "diff --git a/ddtrace/internal/runtime/runtime_metrics.py b/ddtrace/internal/runtime/runtime_metrics.py\n--- a/ddtrace/internal/runtime/runtime_metrics.py\n+++ b/ddtrace/internal/runtime/runtime_metrics.py\n@@ -60,10 +60,10 @@\n \n FLUSH_INTERVAL = 10\n \n- def __init__(self, statsd_client, flush_interval=None):\n+ def __init__(self, statsd_client, flush_interval=FLUSH_INTERVAL):\n self._stay_alive = None\n self._thread = None\n- self._flush_interval = flush_interval or self.FLUSH_INTERVAL\n+ self._flush_interval = flush_interval\n self._statsd_client = statsd_client\n self._runtime_metrics = RuntimeMetrics()\n \n@@ -85,6 +85,10 @@\n log.debug('Stopping {}'.format(self))\n self._stay_alive = False\n \n+ def join(self, timeout=None):\n+ if self._thread:\n+ return self._thread.join(timeout)\n+\n def _write_metric(self, key, value):\n log.debug('Writing metric {}:{}'.format(key, value))\n self._statsd_client.gauge(key, value)\n", "issue": "tests.internal.runtime.test_runtime_metrics.TestRuntimeWorker.test_worker_metrics fails randomly\n```\r\n def test_worker_metrics(self):\r\n self.tracer.configure(collect_metrics=True)\r\n \r\n with self.override_global_tracer(self.tracer):\r\n self.tracer._dogstatsd_client = DogStatsd()\r\n self.tracer._dogstatsd_client.socket = FakeSocket()\r\n \r\n root = self.start_span('parent', service='parent')\r\n context = root.context\r\n self.start_span('child', service='child', child_of=context)\r\n \r\n self.worker = RuntimeWorker(self.tracer._dogstatsd_client)\r\n self.worker.start()\r\n self.worker.stop()\r\n \r\n # get all received metrics\r\n received = []\r\n while True:\r\n new = self.tracer._dogstatsd_client.socket.recv()\r\n if not new:\r\n break\r\n \r\n received.append(new)\r\n # DEV: sleep since metrics will still be getting collected and written\r\n time.sleep(.5)\r\n \r\n # expect received all default metrics\r\n> self.assertEqual(len(received), len(DEFAULT_RUNTIME_METRICS))\r\nE AssertionError: 0 != 10\r\n\r\ntests/internal/runtime/test_runtime_metrics.py:75: AssertionError\r\n```\r\n\r\nhttps://circleci.com/gh/DataDog/dd-trace-py/114364\n", "before_files": [{"content": "import threading\nimport time\nimport itertools\n\nfrom ..logger import get_logger\nfrom .constants import (\n DEFAULT_RUNTIME_METRICS,\n DEFAULT_RUNTIME_TAGS,\n)\nfrom .metric_collectors import (\n GCRuntimeMetricCollector,\n PSUtilRuntimeMetricCollector,\n)\nfrom .tag_collectors import (\n TracerTagCollector,\n)\n\nlog = get_logger(__name__)\n\n\nclass RuntimeCollectorsIterable(object):\n def __init__(self, enabled=None):\n self._enabled = enabled or self.ENABLED\n # Initialize the collectors.\n self._collectors = [c() for c in self.COLLECTORS]\n\n def __iter__(self):\n collected = (\n collector.collect(self._enabled)\n for collector in self._collectors\n )\n return itertools.chain.from_iterable(collected)\n\n def __repr__(self):\n return '{}(enabled={})'.format(\n self.__class__.__name__,\n self._enabled,\n )\n\n\nclass RuntimeTags(RuntimeCollectorsIterable):\n ENABLED = DEFAULT_RUNTIME_TAGS\n COLLECTORS = [\n TracerTagCollector,\n ]\n\n\nclass RuntimeMetrics(RuntimeCollectorsIterable):\n ENABLED = DEFAULT_RUNTIME_METRICS\n COLLECTORS = [\n GCRuntimeMetricCollector,\n PSUtilRuntimeMetricCollector,\n ]\n\n\nclass RuntimeWorker(object):\n \"\"\" Worker thread for collecting and writing runtime metrics to a DogStatsd\n client.\n \"\"\"\n\n FLUSH_INTERVAL = 10\n\n def __init__(self, statsd_client, flush_interval=None):\n self._stay_alive = None\n self._thread = None\n self._flush_interval = flush_interval or self.FLUSH_INTERVAL\n self._statsd_client = statsd_client\n self._runtime_metrics = RuntimeMetrics()\n\n def _target(self):\n while self._stay_alive:\n self.flush()\n time.sleep(self._flush_interval)\n\n def start(self):\n if not self._thread:\n log.debug('Starting {}'.format(self))\n self._stay_alive = True\n self._thread = threading.Thread(target=self._target)\n self._thread.setDaemon(True)\n self._thread.start()\n\n def stop(self):\n if self._thread and self._stay_alive:\n log.debug('Stopping {}'.format(self))\n self._stay_alive = False\n\n def _write_metric(self, key, value):\n log.debug('Writing metric {}:{}'.format(key, value))\n self._statsd_client.gauge(key, value)\n\n def flush(self):\n if not self._statsd_client:\n log.warn('Attempted flush with uninitialized or failed statsd client')\n return\n\n for key, value in self._runtime_metrics:\n self._write_metric(key, value)\n\n def reset(self):\n self._runtime_metrics = RuntimeMetrics()\n\n def __repr__(self):\n return '{}(runtime_metrics={})'.format(\n self.__class__.__name__,\n self._runtime_metrics,\n )\n", "path": "ddtrace/internal/runtime/runtime_metrics.py"}], "after_files": [{"content": "import threading\nimport time\nimport itertools\n\nfrom ..logger import get_logger\nfrom .constants import (\n DEFAULT_RUNTIME_METRICS,\n DEFAULT_RUNTIME_TAGS,\n)\nfrom .metric_collectors import (\n GCRuntimeMetricCollector,\n PSUtilRuntimeMetricCollector,\n)\nfrom .tag_collectors import (\n TracerTagCollector,\n)\n\nlog = get_logger(__name__)\n\n\nclass RuntimeCollectorsIterable(object):\n def __init__(self, enabled=None):\n self._enabled = enabled or self.ENABLED\n # Initialize the collectors.\n self._collectors = [c() for c in self.COLLECTORS]\n\n def __iter__(self):\n collected = (\n collector.collect(self._enabled)\n for collector in self._collectors\n )\n return itertools.chain.from_iterable(collected)\n\n def __repr__(self):\n return '{}(enabled={})'.format(\n self.__class__.__name__,\n self._enabled,\n )\n\n\nclass RuntimeTags(RuntimeCollectorsIterable):\n ENABLED = DEFAULT_RUNTIME_TAGS\n COLLECTORS = [\n TracerTagCollector,\n ]\n\n\nclass RuntimeMetrics(RuntimeCollectorsIterable):\n ENABLED = DEFAULT_RUNTIME_METRICS\n COLLECTORS = [\n GCRuntimeMetricCollector,\n PSUtilRuntimeMetricCollector,\n ]\n\n\nclass RuntimeWorker(object):\n \"\"\" Worker thread for collecting and writing runtime metrics to a DogStatsd\n client.\n \"\"\"\n\n FLUSH_INTERVAL = 10\n\n def __init__(self, statsd_client, flush_interval=FLUSH_INTERVAL):\n self._stay_alive = None\n self._thread = None\n self._flush_interval = flush_interval\n self._statsd_client = statsd_client\n self._runtime_metrics = RuntimeMetrics()\n\n def _target(self):\n while self._stay_alive:\n self.flush()\n time.sleep(self._flush_interval)\n\n def start(self):\n if not self._thread:\n log.debug('Starting {}'.format(self))\n self._stay_alive = True\n self._thread = threading.Thread(target=self._target)\n self._thread.setDaemon(True)\n self._thread.start()\n\n def stop(self):\n if self._thread and self._stay_alive:\n log.debug('Stopping {}'.format(self))\n self._stay_alive = False\n\n def join(self, timeout=None):\n if self._thread:\n return self._thread.join(timeout)\n\n def _write_metric(self, key, value):\n log.debug('Writing metric {}:{}'.format(key, value))\n self._statsd_client.gauge(key, value)\n\n def flush(self):\n if not self._statsd_client:\n log.warn('Attempted flush with uninitialized or failed statsd client')\n return\n\n for key, value in self._runtime_metrics:\n self._write_metric(key, value)\n\n def reset(self):\n self._runtime_metrics = RuntimeMetrics()\n\n def __repr__(self):\n return '{}(runtime_metrics={})'.format(\n self.__class__.__name__,\n self._runtime_metrics,\n )\n", "path": "ddtrace/internal/runtime/runtime_metrics.py"}]}
| 1,384 | 253 |
gh_patches_debug_13078
|
rasdani/github-patches
|
git_diff
|
searxng__searxng-1869
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Duckduckgo weather formatting error
**Version of SearXNG, commit number if you are using on master branch and stipulate if you forked SearXNG**
2022.10.01-901143f0
**How did you install SearXNG?**
Script
**What happened?**
Humidity formatting has floating point error, 14+ numbers after decimal. Stuff like `56.00000000000001%` and `56.99999999999999%`
**How To Reproduce**
`!ddw 12345` (or some other zip)
Scroll through results until you see error, it's fairly common
**Expected behavior**
Humidity will be formatted such as 56%, 57%, etc
**Screenshots & Logs**

Duckduckgo weather formatting error
**Version of SearXNG, commit number if you are using on master branch and stipulate if you forked SearXNG**
2022.10.01-901143f0
**How did you install SearXNG?**
Script
**What happened?**
Humidity formatting has floating point error, 14+ numbers after decimal. Stuff like `56.00000000000001%` and `56.99999999999999%`
**How To Reproduce**
`!ddw 12345` (or some other zip)
Scroll through results until you see error, it's fairly common
**Expected behavior**
Humidity will be formatted such as 56%, 57%, etc
**Screenshots & Logs**

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `searx/engines/duckduckgo_weather.py`
Content:
```
1 # SPDX-License-Identifier: AGPL-3.0-or-later
2 # lint: pylint
3 """DuckDuckGo Weather"""
4
5 from json import loads
6 from urllib.parse import quote
7
8 from datetime import datetime
9 from flask_babel import gettext
10
11 about = {
12 "website": 'https://duckduckgo.com/',
13 "wikidata_id": 'Q12805',
14 "official_api_documentation": None,
15 "use_official_api": True,
16 "require_api_key": False,
17 "results": "JSON",
18 }
19
20 categories = ["others"]
21
22 url = "https://duckduckgo.com/js/spice/forecast/{query}/{lang}"
23
24
25 def generate_condition_table(condition):
26 res = ""
27
28 res += f"<tr><td><b>{gettext('Condition')}</b></td>" f"<td><b>{condition['summary']}</b></td></tr>"
29
30 res += (
31 f"<tr><td><b>{gettext('Temperature')}</b></td>"
32 f"<td><b>{f_to_c(condition['temperature'])}°C / {condition['temperature']}°F</b></td></tr>"
33 )
34
35 res += (
36 f"<tr><td>{gettext('Feels like')}</td><td>{f_to_c(condition['apparentTemperature'])}°C / "
37 f"{condition['apparentTemperature']}°F</td></tr>"
38 )
39
40 res += (
41 f"<tr><td>{gettext('Wind')}</td><td>{condition['windBearing']}° — "
42 f"{'%.2f' % (condition['windSpeed'] * 1.6093440006147)} km/h / {condition['windSpeed']} mph</td></tr>"
43 )
44
45 res += f"<tr><td>{gettext('Visibility')}</td><td>{condition['visibility']} km</td>"
46
47 res += f"<tr><td>{gettext('Humidity')}</td><td>{condition['humidity'] * 100}%</td></tr>"
48
49 return res
50
51
52 def generate_day_table(day):
53 res = ""
54
55 res += (
56 f"<tr><td>{gettext('Min temp.')}</td><td>{f_to_c(day['temperatureLow'])}°C / "
57 f"{day['temperatureLow']}°F</td></tr>"
58 )
59 res += (
60 f"<tr><td>{gettext('Max temp.')}</td><td>{f_to_c(day['temperatureHigh'])}°C / "
61 f"{day['temperatureHigh']}°F</td></tr>"
62 )
63 res += f"<tr><td>{gettext('UV index')}</td><td>{day['uvIndex']}</td></tr>"
64 res += (
65 f"<tr><td>{gettext('Sunrise')}</td><td>{datetime.fromtimestamp(day['sunriseTime']).strftime('%H:%M')}</td></tr>"
66 )
67 res += (
68 f"<tr><td>{gettext('Sunset')}</td><td>{datetime.fromtimestamp(day['sunsetTime']).strftime('%H:%M')}</td></tr>"
69 )
70
71 return res
72
73
74 def request(query, params):
75 params["url"] = url.format(query=quote(query), lang=params['language'].split('-')[0])
76
77 return params
78
79
80 def f_to_c(temperature):
81 return "%.2f" % ((temperature - 32) / 1.8)
82
83
84 def response(resp):
85 results = []
86
87 if resp.text.strip() == "ddg_spice_forecast();":
88 return []
89
90 result = loads(resp.text[resp.text.find('\n') + 1 : resp.text.rfind('\n') - 2])
91
92 current = result["currently"]
93
94 title = result['flags']['ddg-location']
95
96 infobox = f"<h3>{gettext('Current condition')}</h3><table><tbody>"
97
98 infobox += generate_condition_table(current)
99
100 infobox += "</tbody></table>"
101
102 last_date = None
103
104 for time in result['hourly']['data']:
105 current_time = datetime.fromtimestamp(time['time'])
106
107 if last_date != current_time.date():
108 if last_date is not None:
109 infobox += "</tbody></table>"
110
111 infobox += f"<h3>{current_time.strftime('%Y-%m-%d')}</h3>"
112
113 infobox += "<table><tbody>"
114
115 for day in result['daily']['data']:
116 if datetime.fromtimestamp(day['time']).date() == current_time.date():
117 infobox += generate_day_table(day)
118
119 infobox += "</tbody></table><table><tbody>"
120
121 last_date = current_time.date()
122
123 infobox += f"<tr><td rowspan=\"7\"><b>{current_time.strftime('%H:%M')}</b></td></tr>"
124
125 infobox += generate_condition_table(time)
126
127 infobox += "</tbody></table>"
128
129 results.append(
130 {
131 "infobox": title,
132 "content": infobox,
133 }
134 )
135
136 return results
137
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/searx/engines/duckduckgo_weather.py b/searx/engines/duckduckgo_weather.py
--- a/searx/engines/duckduckgo_weather.py
+++ b/searx/engines/duckduckgo_weather.py
@@ -39,12 +39,12 @@
res += (
f"<tr><td>{gettext('Wind')}</td><td>{condition['windBearing']}° — "
- f"{'%.2f' % (condition['windSpeed'] * 1.6093440006147)} km/h / {condition['windSpeed']} mph</td></tr>"
+ f"{(condition['windSpeed'] * 1.6093440006147):.2f} km/h / {condition['windSpeed']} mph</td></tr>"
)
res += f"<tr><td>{gettext('Visibility')}</td><td>{condition['visibility']} km</td>"
- res += f"<tr><td>{gettext('Humidity')}</td><td>{condition['humidity'] * 100}%</td></tr>"
+ res += f"<tr><td>{gettext('Humidity')}</td><td>{(condition['humidity'] * 100):.1f}%</td></tr>"
return res
|
{"golden_diff": "diff --git a/searx/engines/duckduckgo_weather.py b/searx/engines/duckduckgo_weather.py\n--- a/searx/engines/duckduckgo_weather.py\n+++ b/searx/engines/duckduckgo_weather.py\n@@ -39,12 +39,12 @@\n \n res += (\n f\"<tr><td>{gettext('Wind')}</td><td>{condition['windBearing']}\u00b0 \u2014 \"\n- f\"{'%.2f' % (condition['windSpeed'] * 1.6093440006147)} km/h / {condition['windSpeed']} mph</td></tr>\"\n+ f\"{(condition['windSpeed'] * 1.6093440006147):.2f} km/h / {condition['windSpeed']} mph</td></tr>\"\n )\n \n res += f\"<tr><td>{gettext('Visibility')}</td><td>{condition['visibility']} km</td>\"\n \n- res += f\"<tr><td>{gettext('Humidity')}</td><td>{condition['humidity'] * 100}%</td></tr>\"\n+ res += f\"<tr><td>{gettext('Humidity')}</td><td>{(condition['humidity'] * 100):.1f}%</td></tr>\"\n \n return res\n", "issue": "Duckduckgo weather formatting error\n**Version of SearXNG, commit number if you are using on master branch and stipulate if you forked SearXNG**\r\n2022.10.01-901143f0\r\n\r\n**How did you install SearXNG?**\r\nScript\r\n**What happened?**\r\nHumidity formatting has floating point error, 14+ numbers after decimal. Stuff like `56.00000000000001%` and `56.99999999999999%`\r\n**How To Reproduce**\r\n`!ddw 12345` (or some other zip)\r\n\r\nScroll through results until you see error, it's fairly common\r\n\r\n**Expected behavior**\r\nHumidity will be formatted such as 56%, 57%, etc\r\n\r\n**Screenshots & Logs**\r\n\r\n\nDuckduckgo weather formatting error\n**Version of SearXNG, commit number if you are using on master branch and stipulate if you forked SearXNG**\r\n2022.10.01-901143f0\r\n\r\n**How did you install SearXNG?**\r\nScript\r\n**What happened?**\r\nHumidity formatting has floating point error, 14+ numbers after decimal. Stuff like `56.00000000000001%` and `56.99999999999999%`\r\n**How To Reproduce**\r\n`!ddw 12345` (or some other zip)\r\n\r\nScroll through results until you see error, it's fairly common\r\n\r\n**Expected behavior**\r\nHumidity will be formatted such as 56%, 57%, etc\r\n\r\n**Screenshots & Logs**\r\n\r\n\n", "before_files": [{"content": "# SPDX-License-Identifier: AGPL-3.0-or-later\n# lint: pylint\n\"\"\"DuckDuckGo Weather\"\"\"\n\nfrom json import loads\nfrom urllib.parse import quote\n\nfrom datetime import datetime\nfrom flask_babel import gettext\n\nabout = {\n \"website\": 'https://duckduckgo.com/',\n \"wikidata_id\": 'Q12805',\n \"official_api_documentation\": None,\n \"use_official_api\": True,\n \"require_api_key\": False,\n \"results\": \"JSON\",\n}\n\ncategories = [\"others\"]\n\nurl = \"https://duckduckgo.com/js/spice/forecast/{query}/{lang}\"\n\n\ndef generate_condition_table(condition):\n res = \"\"\n\n res += f\"<tr><td><b>{gettext('Condition')}</b></td>\" f\"<td><b>{condition['summary']}</b></td></tr>\"\n\n res += (\n f\"<tr><td><b>{gettext('Temperature')}</b></td>\"\n f\"<td><b>{f_to_c(condition['temperature'])}\u00b0C / {condition['temperature']}\u00b0F</b></td></tr>\"\n )\n\n res += (\n f\"<tr><td>{gettext('Feels like')}</td><td>{f_to_c(condition['apparentTemperature'])}\u00b0C / \"\n f\"{condition['apparentTemperature']}\u00b0F</td></tr>\"\n )\n\n res += (\n f\"<tr><td>{gettext('Wind')}</td><td>{condition['windBearing']}\u00b0 \u2014 \"\n f\"{'%.2f' % (condition['windSpeed'] * 1.6093440006147)} km/h / {condition['windSpeed']} mph</td></tr>\"\n )\n\n res += f\"<tr><td>{gettext('Visibility')}</td><td>{condition['visibility']} km</td>\"\n\n res += f\"<tr><td>{gettext('Humidity')}</td><td>{condition['humidity'] * 100}%</td></tr>\"\n\n return res\n\n\ndef generate_day_table(day):\n res = \"\"\n\n res += (\n f\"<tr><td>{gettext('Min temp.')}</td><td>{f_to_c(day['temperatureLow'])}\u00b0C / \"\n f\"{day['temperatureLow']}\u00b0F</td></tr>\"\n )\n res += (\n f\"<tr><td>{gettext('Max temp.')}</td><td>{f_to_c(day['temperatureHigh'])}\u00b0C / \"\n f\"{day['temperatureHigh']}\u00b0F</td></tr>\"\n )\n res += f\"<tr><td>{gettext('UV index')}</td><td>{day['uvIndex']}</td></tr>\"\n res += (\n f\"<tr><td>{gettext('Sunrise')}</td><td>{datetime.fromtimestamp(day['sunriseTime']).strftime('%H:%M')}</td></tr>\"\n )\n res += (\n f\"<tr><td>{gettext('Sunset')}</td><td>{datetime.fromtimestamp(day['sunsetTime']).strftime('%H:%M')}</td></tr>\"\n )\n\n return res\n\n\ndef request(query, params):\n params[\"url\"] = url.format(query=quote(query), lang=params['language'].split('-')[0])\n\n return params\n\n\ndef f_to_c(temperature):\n return \"%.2f\" % ((temperature - 32) / 1.8)\n\n\ndef response(resp):\n results = []\n\n if resp.text.strip() == \"ddg_spice_forecast();\":\n return []\n\n result = loads(resp.text[resp.text.find('\\n') + 1 : resp.text.rfind('\\n') - 2])\n\n current = result[\"currently\"]\n\n title = result['flags']['ddg-location']\n\n infobox = f\"<h3>{gettext('Current condition')}</h3><table><tbody>\"\n\n infobox += generate_condition_table(current)\n\n infobox += \"</tbody></table>\"\n\n last_date = None\n\n for time in result['hourly']['data']:\n current_time = datetime.fromtimestamp(time['time'])\n\n if last_date != current_time.date():\n if last_date is not None:\n infobox += \"</tbody></table>\"\n\n infobox += f\"<h3>{current_time.strftime('%Y-%m-%d')}</h3>\"\n\n infobox += \"<table><tbody>\"\n\n for day in result['daily']['data']:\n if datetime.fromtimestamp(day['time']).date() == current_time.date():\n infobox += generate_day_table(day)\n\n infobox += \"</tbody></table><table><tbody>\"\n\n last_date = current_time.date()\n\n infobox += f\"<tr><td rowspan=\\\"7\\\"><b>{current_time.strftime('%H:%M')}</b></td></tr>\"\n\n infobox += generate_condition_table(time)\n\n infobox += \"</tbody></table>\"\n\n results.append(\n {\n \"infobox\": title,\n \"content\": infobox,\n }\n )\n\n return results\n", "path": "searx/engines/duckduckgo_weather.py"}], "after_files": [{"content": "# SPDX-License-Identifier: AGPL-3.0-or-later\n# lint: pylint\n\"\"\"DuckDuckGo Weather\"\"\"\n\nfrom json import loads\nfrom urllib.parse import quote\n\nfrom datetime import datetime\nfrom flask_babel import gettext\n\nabout = {\n \"website\": 'https://duckduckgo.com/',\n \"wikidata_id\": 'Q12805',\n \"official_api_documentation\": None,\n \"use_official_api\": True,\n \"require_api_key\": False,\n \"results\": \"JSON\",\n}\n\ncategories = [\"others\"]\n\nurl = \"https://duckduckgo.com/js/spice/forecast/{query}/{lang}\"\n\n\ndef generate_condition_table(condition):\n res = \"\"\n\n res += f\"<tr><td><b>{gettext('Condition')}</b></td>\" f\"<td><b>{condition['summary']}</b></td></tr>\"\n\n res += (\n f\"<tr><td><b>{gettext('Temperature')}</b></td>\"\n f\"<td><b>{f_to_c(condition['temperature'])}\u00b0C / {condition['temperature']}\u00b0F</b></td></tr>\"\n )\n\n res += (\n f\"<tr><td>{gettext('Feels like')}</td><td>{f_to_c(condition['apparentTemperature'])}\u00b0C / \"\n f\"{condition['apparentTemperature']}\u00b0F</td></tr>\"\n )\n\n res += (\n f\"<tr><td>{gettext('Wind')}</td><td>{condition['windBearing']}\u00b0 \u2014 \"\n f\"{(condition['windSpeed'] * 1.6093440006147):.2f} km/h / {condition['windSpeed']} mph</td></tr>\"\n )\n\n res += f\"<tr><td>{gettext('Visibility')}</td><td>{condition['visibility']} km</td>\"\n\n res += f\"<tr><td>{gettext('Humidity')}</td><td>{(condition['humidity'] * 100):.1f}%</td></tr>\"\n\n return res\n\n\ndef generate_day_table(day):\n res = \"\"\n\n res += (\n f\"<tr><td>{gettext('Min temp.')}</td><td>{f_to_c(day['temperatureLow'])}\u00b0C / \"\n f\"{day['temperatureLow']}\u00b0F</td></tr>\"\n )\n res += (\n f\"<tr><td>{gettext('Max temp.')}</td><td>{f_to_c(day['temperatureHigh'])}\u00b0C / \"\n f\"{day['temperatureHigh']}\u00b0F</td></tr>\"\n )\n res += f\"<tr><td>{gettext('UV index')}</td><td>{day['uvIndex']}</td></tr>\"\n res += (\n f\"<tr><td>{gettext('Sunrise')}</td><td>{datetime.fromtimestamp(day['sunriseTime']).strftime('%H:%M')}</td></tr>\"\n )\n res += (\n f\"<tr><td>{gettext('Sunset')}</td><td>{datetime.fromtimestamp(day['sunsetTime']).strftime('%H:%M')}</td></tr>\"\n )\n\n return res\n\n\ndef request(query, params):\n params[\"url\"] = url.format(query=quote(query), lang=params['language'].split('-')[0])\n\n return params\n\n\ndef f_to_c(temperature):\n return \"%.2f\" % ((temperature - 32) / 1.8)\n\n\ndef response(resp):\n results = []\n\n if resp.text.strip() == \"ddg_spice_forecast();\":\n return []\n\n result = loads(resp.text[resp.text.find('\\n') + 1 : resp.text.rfind('\\n') - 2])\n\n current = result[\"currently\"]\n\n title = result['flags']['ddg-location']\n\n infobox = f\"<h3>{gettext('Current condition')}</h3><table><tbody>\"\n\n infobox += generate_condition_table(current)\n\n infobox += \"</tbody></table>\"\n\n last_date = None\n\n for time in result['hourly']['data']:\n current_time = datetime.fromtimestamp(time['time'])\n\n if last_date != current_time.date():\n if last_date is not None:\n infobox += \"</tbody></table>\"\n\n infobox += f\"<h3>{current_time.strftime('%Y-%m-%d')}</h3>\"\n\n infobox += \"<table><tbody>\"\n\n for day in result['daily']['data']:\n if datetime.fromtimestamp(day['time']).date() == current_time.date():\n infobox += generate_day_table(day)\n\n infobox += \"</tbody></table><table><tbody>\"\n\n last_date = current_time.date()\n\n infobox += f\"<tr><td rowspan=\\\"7\\\"><b>{current_time.strftime('%H:%M')}</b></td></tr>\"\n\n infobox += generate_condition_table(time)\n\n infobox += \"</tbody></table>\"\n\n results.append(\n {\n \"infobox\": title,\n \"content\": infobox,\n }\n )\n\n return results\n", "path": "searx/engines/duckduckgo_weather.py"}]}
| 2,178 | 306 |
gh_patches_debug_14602
|
rasdani/github-patches
|
git_diff
|
akvo__akvo-rsr-3173
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Error in disaggregation view
The PGView for disaggregation is incorrect. It includes data from all updates rather than just approved updates.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `akvo/rsr/models/result/indicator_period_aggregation.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 # Akvo Reporting is covered by the GNU Affero General Public License.
4 # See more details in the license.txt file located at the root folder of the Akvo RSR module.
5 # For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.
6
7 from django.db import models
8
9 from django_pgviews import view as pg
10
11
12 ACTUAL_VALUE_SQL = """
13 SELECT
14 -- row_number() OVER... creates an artificial "pk" column, without which Django will protest
15 row_number() OVER (ORDER BY period.id) AS id,
16 period.id AS period_id,
17 indicator.measure as measure,
18 sum((update.value) :: DECIMAL(20,2)) AS value,
19 sum((update.numerator) :: DECIMAL(20,2)) AS numerator,
20 sum((update.denominator) :: DECIMAL(20,2)) AS denominator
21 FROM
22 rsr_indicatorperiod period,
23 rsr_indicator indicator,
24 rsr_indicatorperioddata update
25 WHERE
26 (
27 (((indicator.id = period.indicator_id) AND
28 (period.id = update.period_id)) AND
29 ((update.status) :: TEXT = 'A' :: TEXT)) AND
30 ((update.value) :: TEXT ~ '^\d+\.?\d{0,2}$' :: TEXT OR update.value IS NULL)
31 )
32 GROUP BY period.id, indicator.measure;
33 """
34
35
36 class PeriodActualValue(pg.View):
37 # on_delete=models.DO_NOTHING is needed to prevent problems with PG trying to delete views' data
38 period = models.ForeignKey('IndicatorPeriod', on_delete=models.DO_NOTHING)
39 measure = models.CharField(max_length=1)
40 value = models.IntegerField()
41 numerator = models.IntegerField()
42 denominator = models.IntegerField()
43
44 sql = ACTUAL_VALUE_SQL
45
46 class Meta:
47 app_label = 'rsr'
48 db_table = 'rsr_indicator_period_actual_value'
49 managed = False
50
51
52 DISAGG_SQL = """
53 WITH aggregated_disaggs AS (
54 SELECT
55 dimension_id,
56 sum(("value") :: DECIMAL(20,2)) AS value,
57 sum((numerator) :: DECIMAL(20,2)) AS numerator,
58 sum((denominator) :: DECIMAL(20,2)) AS denominator
59 FROM
60 rsr_disaggregation
61 GROUP BY
62 dimension_id
63 ),
64 period_disaggs AS (
65 SELECT DISTINCT
66 indicator.id AS indicator_id,
67 period.id AS period_id,
68 dimension.name AS dimension_name,
69 dimension.value AS dimension_value,
70 agg.value,
71 agg.numerator,
72 agg.denominator
73 FROM
74 rsr_indicator indicator,
75 rsr_indicatorperiod period,
76 rsr_indicatorperioddata update,
77 aggregated_disaggs agg,
78 rsr_indicatordimension dimension
79 WHERE
80 indicator.id = period.indicator_id AND
81 period.id = update.period_id AND
82 indicator.id = dimension.indicator_id AND
83 dimension.id = agg.dimension_id
84 )
85 SELECT
86 row_number() OVER (ORDER BY indicator_id) AS id,
87 *
88 FROM period_disaggs
89 """
90
91
92 class PeriodDisaggregation(pg.View):
93 indicator = models.ForeignKey('Indicator', on_delete=models.DO_NOTHING)
94 period = models.ForeignKey('IndicatorPeriod', on_delete=models.DO_NOTHING)
95 dimension_name = models.CharField(max_length=100)
96 dimension_value = models.CharField(max_length=100)
97 value = models.IntegerField()
98 numerator = models.IntegerField()
99 denominator = models.IntegerField()
100
101 sql = DISAGG_SQL
102
103 class Meta:
104 app_label = 'rsr'
105 db_table = 'rsr_indicator_period_disaggregation'
106 managed = False
107
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/akvo/rsr/models/result/indicator_period_aggregation.py b/akvo/rsr/models/result/indicator_period_aggregation.py
--- a/akvo/rsr/models/result/indicator_period_aggregation.py
+++ b/akvo/rsr/models/result/indicator_period_aggregation.py
@@ -52,12 +52,16 @@
DISAGG_SQL = """
WITH aggregated_disaggs AS (
SELECT
- dimension_id,
- sum(("value") :: DECIMAL(20,2)) AS value,
- sum((numerator) :: DECIMAL(20,2)) AS numerator,
- sum((denominator) :: DECIMAL(20,2)) AS denominator
+ disagg.dimension_id AS dimension_id,
+ sum((disagg.value) :: DECIMAL(20,2)) AS value,
+ sum((disagg.numerator) :: DECIMAL(20,2)) AS numerator,
+ sum((disagg.denominator) :: DECIMAL(20,2)) AS denominator
FROM
- rsr_disaggregation
+ rsr_disaggregation disagg,
+ rsr_indicatorperioddata "update"
+ WHERE
+ update.status = 'A' AND
+ disagg.update_id = update.id
GROUP BY
dimension_id
),
|
{"golden_diff": "diff --git a/akvo/rsr/models/result/indicator_period_aggregation.py b/akvo/rsr/models/result/indicator_period_aggregation.py\n--- a/akvo/rsr/models/result/indicator_period_aggregation.py\n+++ b/akvo/rsr/models/result/indicator_period_aggregation.py\n@@ -52,12 +52,16 @@\n DISAGG_SQL = \"\"\"\n WITH aggregated_disaggs AS (\n SELECT\n- dimension_id,\n- sum((\"value\") :: DECIMAL(20,2)) AS value,\n- sum((numerator) :: DECIMAL(20,2)) AS numerator,\n- sum((denominator) :: DECIMAL(20,2)) AS denominator\n+ disagg.dimension_id AS dimension_id,\n+ sum((disagg.value) :: DECIMAL(20,2)) AS value,\n+ sum((disagg.numerator) :: DECIMAL(20,2)) AS numerator,\n+ sum((disagg.denominator) :: DECIMAL(20,2)) AS denominator\n FROM\n- rsr_disaggregation\n+ rsr_disaggregation disagg,\n+ rsr_indicatorperioddata \"update\"\n+ WHERE\n+ update.status = 'A' AND\n+ disagg.update_id = update.id\n GROUP BY\n dimension_id\n ),\n", "issue": "Error in disaggregation view\nThe PGView for disaggregation is incorrect. It includes data from all updates rather than just approved updates.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Akvo Reporting is covered by the GNU Affero General Public License.\n# See more details in the license.txt file located at the root folder of the Akvo RSR module.\n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\nfrom django.db import models\n\nfrom django_pgviews import view as pg\n\n\nACTUAL_VALUE_SQL = \"\"\"\n SELECT\n -- row_number() OVER... creates an artificial \"pk\" column, without which Django will protest\n row_number() OVER (ORDER BY period.id) AS id,\n period.id AS period_id,\n indicator.measure as measure,\n sum((update.value) :: DECIMAL(20,2)) AS value,\n sum((update.numerator) :: DECIMAL(20,2)) AS numerator,\n sum((update.denominator) :: DECIMAL(20,2)) AS denominator\n FROM\n rsr_indicatorperiod period,\n rsr_indicator indicator,\n rsr_indicatorperioddata update\n WHERE\n (\n (((indicator.id = period.indicator_id) AND\n (period.id = update.period_id)) AND\n ((update.status) :: TEXT = 'A' :: TEXT)) AND\n ((update.value) :: TEXT ~ '^\\d+\\.?\\d{0,2}$' :: TEXT OR update.value IS NULL)\n )\n GROUP BY period.id, indicator.measure;\n\"\"\"\n\n\nclass PeriodActualValue(pg.View):\n # on_delete=models.DO_NOTHING is needed to prevent problems with PG trying to delete views' data\n period = models.ForeignKey('IndicatorPeriod', on_delete=models.DO_NOTHING)\n measure = models.CharField(max_length=1)\n value = models.IntegerField()\n numerator = models.IntegerField()\n denominator = models.IntegerField()\n\n sql = ACTUAL_VALUE_SQL\n\n class Meta:\n app_label = 'rsr'\n db_table = 'rsr_indicator_period_actual_value'\n managed = False\n\n\nDISAGG_SQL = \"\"\"\n WITH aggregated_disaggs AS (\n SELECT\n dimension_id,\n sum((\"value\") :: DECIMAL(20,2)) AS value,\n sum((numerator) :: DECIMAL(20,2)) AS numerator,\n sum((denominator) :: DECIMAL(20,2)) AS denominator\n FROM\n rsr_disaggregation\n GROUP BY\n dimension_id\n ),\n period_disaggs AS (\n SELECT DISTINCT\n indicator.id AS indicator_id,\n period.id AS period_id,\n dimension.name AS dimension_name,\n dimension.value AS dimension_value,\n agg.value,\n agg.numerator,\n agg.denominator\n FROM\n rsr_indicator indicator,\n rsr_indicatorperiod period,\n rsr_indicatorperioddata update,\n aggregated_disaggs agg,\n rsr_indicatordimension dimension\n WHERE\n indicator.id = period.indicator_id AND\n period.id = update.period_id AND\n indicator.id = dimension.indicator_id AND\n dimension.id = agg.dimension_id\n )\n SELECT\n row_number() OVER (ORDER BY indicator_id) AS id,\n *\n FROM period_disaggs\n\"\"\"\n\n\nclass PeriodDisaggregation(pg.View):\n indicator = models.ForeignKey('Indicator', on_delete=models.DO_NOTHING)\n period = models.ForeignKey('IndicatorPeriod', on_delete=models.DO_NOTHING)\n dimension_name = models.CharField(max_length=100)\n dimension_value = models.CharField(max_length=100)\n value = models.IntegerField()\n numerator = models.IntegerField()\n denominator = models.IntegerField()\n\n sql = DISAGG_SQL\n\n class Meta:\n app_label = 'rsr'\n db_table = 'rsr_indicator_period_disaggregation'\n managed = False\n", "path": "akvo/rsr/models/result/indicator_period_aggregation.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Akvo Reporting is covered by the GNU Affero General Public License.\n# See more details in the license.txt file located at the root folder of the Akvo RSR module.\n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\nfrom django.db import models\n\nfrom django_pgviews import view as pg\n\n\nACTUAL_VALUE_SQL = \"\"\"\n SELECT\n -- row_number() OVER... creates an artificial \"pk\" column, without which Django will protest\n row_number() OVER (ORDER BY period.id) AS id,\n period.id AS period_id,\n indicator.measure as measure,\n sum((update.value) :: DECIMAL(20,2)) AS value,\n sum((update.numerator) :: DECIMAL(20,2)) AS numerator,\n sum((update.denominator) :: DECIMAL(20,2)) AS denominator\n FROM\n rsr_indicatorperiod period,\n rsr_indicator indicator,\n rsr_indicatorperioddata update\n WHERE\n (\n (((indicator.id = period.indicator_id) AND\n (period.id = update.period_id)) AND\n ((update.status) :: TEXT = 'A' :: TEXT)) AND\n ((update.value) :: TEXT ~ '^\\d+\\.?\\d{0,2}$' :: TEXT OR update.value IS NULL)\n )\n GROUP BY period.id, indicator.measure;\n\"\"\"\n\n\nclass PeriodActualValue(pg.View):\n # on_delete=models.DO_NOTHING is needed to prevent problems with PG trying to delete views' data\n period = models.ForeignKey('IndicatorPeriod', on_delete=models.DO_NOTHING)\n measure = models.CharField(max_length=1)\n value = models.IntegerField()\n numerator = models.IntegerField()\n denominator = models.IntegerField()\n\n sql = ACTUAL_VALUE_SQL\n\n class Meta:\n app_label = 'rsr'\n db_table = 'rsr_indicator_period_actual_value'\n managed = False\n\n\nDISAGG_SQL = \"\"\"\n WITH aggregated_disaggs AS (\n SELECT\n disagg.dimension_id AS dimension_id,\n sum((disagg.value) :: DECIMAL(20,2)) AS value,\n sum((disagg.numerator) :: DECIMAL(20,2)) AS numerator,\n sum((disagg.denominator) :: DECIMAL(20,2)) AS denominator\n FROM\n rsr_disaggregation disagg,\n rsr_indicatorperioddata \"update\"\n WHERE\n update.status = 'A' AND\n disagg.update_id = update.id\n GROUP BY\n dimension_id\n ),\n period_disaggs AS (\n SELECT DISTINCT\n indicator.id AS indicator_id,\n period.id AS period_id,\n dimension.name AS dimension_name,\n dimension.value AS dimension_value,\n agg.value,\n agg.numerator,\n agg.denominator\n FROM\n rsr_indicator indicator,\n rsr_indicatorperiod period,\n rsr_indicatorperioddata update,\n aggregated_disaggs agg,\n rsr_indicatordimension dimension\n WHERE\n indicator.id = period.indicator_id AND\n period.id = update.period_id AND\n indicator.id = dimension.indicator_id AND\n dimension.id = agg.dimension_id\n )\n SELECT\n row_number() OVER (ORDER BY indicator_id) AS id,\n *\n FROM period_disaggs\n\"\"\"\n\n\nclass PeriodDisaggregation(pg.View):\n indicator = models.ForeignKey('Indicator', on_delete=models.DO_NOTHING)\n period = models.ForeignKey('IndicatorPeriod', on_delete=models.DO_NOTHING)\n dimension_name = models.CharField(max_length=100)\n dimension_value = models.CharField(max_length=100)\n value = models.IntegerField()\n numerator = models.IntegerField()\n denominator = models.IntegerField()\n\n sql = DISAGG_SQL\n\n class Meta:\n app_label = 'rsr'\n db_table = 'rsr_indicator_period_disaggregation'\n managed = False\n", "path": "akvo/rsr/models/result/indicator_period_aggregation.py"}]}
| 1,312 | 293 |
gh_patches_debug_15246
|
rasdani/github-patches
|
git_diff
|
open-telemetry__opentelemetry-python-1194
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Update baggage header name
As per the spec, baggage propagation must use the header as specified in the w3c baggage specification https://github.com/open-telemetry/opentelemetry-specification/blob/master/specification/baggage/api.md#baggage-propagation
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `opentelemetry-api/src/opentelemetry/baggage/propagation/__init__.py`
Content:
```
1 # Copyright The OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 #
15 import typing
16 import urllib.parse
17
18 from opentelemetry import baggage
19 from opentelemetry.context import get_current
20 from opentelemetry.context.context import Context
21 from opentelemetry.trace.propagation import textmap
22
23
24 class BaggagePropagator(textmap.TextMapPropagator):
25 MAX_HEADER_LENGTH = 8192
26 MAX_PAIR_LENGTH = 4096
27 MAX_PAIRS = 180
28 _BAGGAGE_HEADER_NAME = "otcorrelations"
29
30 def extract(
31 self,
32 get_from_carrier: textmap.Getter[textmap.TextMapPropagatorT],
33 carrier: textmap.TextMapPropagatorT,
34 context: typing.Optional[Context] = None,
35 ) -> Context:
36 """Extract Baggage from the carrier.
37
38 See
39 `opentelemetry.trace.propagation.textmap.TextMapPropagator.extract`
40 """
41
42 if context is None:
43 context = get_current()
44
45 header = _extract_first_element(
46 get_from_carrier(carrier, self._BAGGAGE_HEADER_NAME)
47 )
48
49 if not header or len(header) > self.MAX_HEADER_LENGTH:
50 return context
51
52 baggage_entries = header.split(",")
53 total_baggage_entries = self.MAX_PAIRS
54 for entry in baggage_entries:
55 if total_baggage_entries <= 0:
56 return context
57 total_baggage_entries -= 1
58 if len(entry) > self.MAX_PAIR_LENGTH:
59 continue
60 try:
61 name, value = entry.split("=", 1)
62 except Exception: # pylint: disable=broad-except
63 continue
64 context = baggage.set_baggage(
65 urllib.parse.unquote(name).strip(),
66 urllib.parse.unquote(value).strip(),
67 context=context,
68 )
69
70 return context
71
72 def inject(
73 self,
74 set_in_carrier: textmap.Setter[textmap.TextMapPropagatorT],
75 carrier: textmap.TextMapPropagatorT,
76 context: typing.Optional[Context] = None,
77 ) -> None:
78 """Injects Baggage into the carrier.
79
80 See
81 `opentelemetry.trace.propagation.textmap.TextMapPropagator.inject`
82 """
83 baggage_entries = baggage.get_all(context=context)
84 if not baggage_entries:
85 return
86
87 baggage_string = _format_baggage(baggage_entries)
88 set_in_carrier(
89 carrier, self._BAGGAGE_HEADER_NAME, baggage_string,
90 )
91
92
93 def _format_baggage(baggage_entries: typing.Mapping[str, object]) -> str:
94 return ",".join(
95 key + "=" + urllib.parse.quote_plus(str(value))
96 for key, value in baggage_entries.items()
97 )
98
99
100 def _extract_first_element(
101 items: typing.Iterable[textmap.TextMapPropagatorT],
102 ) -> typing.Optional[textmap.TextMapPropagatorT]:
103 if items is None:
104 return None
105 return next(iter(items), None)
106
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/opentelemetry-api/src/opentelemetry/baggage/propagation/__init__.py b/opentelemetry-api/src/opentelemetry/baggage/propagation/__init__.py
--- a/opentelemetry-api/src/opentelemetry/baggage/propagation/__init__.py
+++ b/opentelemetry-api/src/opentelemetry/baggage/propagation/__init__.py
@@ -25,7 +25,7 @@
MAX_HEADER_LENGTH = 8192
MAX_PAIR_LENGTH = 4096
MAX_PAIRS = 180
- _BAGGAGE_HEADER_NAME = "otcorrelations"
+ _BAGGAGE_HEADER_NAME = "baggage"
def extract(
self,
@@ -85,9 +85,7 @@
return
baggage_string = _format_baggage(baggage_entries)
- set_in_carrier(
- carrier, self._BAGGAGE_HEADER_NAME, baggage_string,
- )
+ set_in_carrier(carrier, self._BAGGAGE_HEADER_NAME, baggage_string)
def _format_baggage(baggage_entries: typing.Mapping[str, object]) -> str:
|
{"golden_diff": "diff --git a/opentelemetry-api/src/opentelemetry/baggage/propagation/__init__.py b/opentelemetry-api/src/opentelemetry/baggage/propagation/__init__.py\n--- a/opentelemetry-api/src/opentelemetry/baggage/propagation/__init__.py\n+++ b/opentelemetry-api/src/opentelemetry/baggage/propagation/__init__.py\n@@ -25,7 +25,7 @@\n MAX_HEADER_LENGTH = 8192\n MAX_PAIR_LENGTH = 4096\n MAX_PAIRS = 180\n- _BAGGAGE_HEADER_NAME = \"otcorrelations\"\n+ _BAGGAGE_HEADER_NAME = \"baggage\"\n \n def extract(\n self,\n@@ -85,9 +85,7 @@\n return\n \n baggage_string = _format_baggage(baggage_entries)\n- set_in_carrier(\n- carrier, self._BAGGAGE_HEADER_NAME, baggage_string,\n- )\n+ set_in_carrier(carrier, self._BAGGAGE_HEADER_NAME, baggage_string)\n \n \n def _format_baggage(baggage_entries: typing.Mapping[str, object]) -> str:\n", "issue": "Update baggage header name\nAs per the spec, baggage propagation must use the header as specified in the w3c baggage specification https://github.com/open-telemetry/opentelemetry-specification/blob/master/specification/baggage/api.md#baggage-propagation\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nimport typing\nimport urllib.parse\n\nfrom opentelemetry import baggage\nfrom opentelemetry.context import get_current\nfrom opentelemetry.context.context import Context\nfrom opentelemetry.trace.propagation import textmap\n\n\nclass BaggagePropagator(textmap.TextMapPropagator):\n MAX_HEADER_LENGTH = 8192\n MAX_PAIR_LENGTH = 4096\n MAX_PAIRS = 180\n _BAGGAGE_HEADER_NAME = \"otcorrelations\"\n\n def extract(\n self,\n get_from_carrier: textmap.Getter[textmap.TextMapPropagatorT],\n carrier: textmap.TextMapPropagatorT,\n context: typing.Optional[Context] = None,\n ) -> Context:\n \"\"\"Extract Baggage from the carrier.\n\n See\n `opentelemetry.trace.propagation.textmap.TextMapPropagator.extract`\n \"\"\"\n\n if context is None:\n context = get_current()\n\n header = _extract_first_element(\n get_from_carrier(carrier, self._BAGGAGE_HEADER_NAME)\n )\n\n if not header or len(header) > self.MAX_HEADER_LENGTH:\n return context\n\n baggage_entries = header.split(\",\")\n total_baggage_entries = self.MAX_PAIRS\n for entry in baggage_entries:\n if total_baggage_entries <= 0:\n return context\n total_baggage_entries -= 1\n if len(entry) > self.MAX_PAIR_LENGTH:\n continue\n try:\n name, value = entry.split(\"=\", 1)\n except Exception: # pylint: disable=broad-except\n continue\n context = baggage.set_baggage(\n urllib.parse.unquote(name).strip(),\n urllib.parse.unquote(value).strip(),\n context=context,\n )\n\n return context\n\n def inject(\n self,\n set_in_carrier: textmap.Setter[textmap.TextMapPropagatorT],\n carrier: textmap.TextMapPropagatorT,\n context: typing.Optional[Context] = None,\n ) -> None:\n \"\"\"Injects Baggage into the carrier.\n\n See\n `opentelemetry.trace.propagation.textmap.TextMapPropagator.inject`\n \"\"\"\n baggage_entries = baggage.get_all(context=context)\n if not baggage_entries:\n return\n\n baggage_string = _format_baggage(baggage_entries)\n set_in_carrier(\n carrier, self._BAGGAGE_HEADER_NAME, baggage_string,\n )\n\n\ndef _format_baggage(baggage_entries: typing.Mapping[str, object]) -> str:\n return \",\".join(\n key + \"=\" + urllib.parse.quote_plus(str(value))\n for key, value in baggage_entries.items()\n )\n\n\ndef _extract_first_element(\n items: typing.Iterable[textmap.TextMapPropagatorT],\n) -> typing.Optional[textmap.TextMapPropagatorT]:\n if items is None:\n return None\n return next(iter(items), None)\n", "path": "opentelemetry-api/src/opentelemetry/baggage/propagation/__init__.py"}], "after_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nimport typing\nimport urllib.parse\n\nfrom opentelemetry import baggage\nfrom opentelemetry.context import get_current\nfrom opentelemetry.context.context import Context\nfrom opentelemetry.trace.propagation import textmap\n\n\nclass BaggagePropagator(textmap.TextMapPropagator):\n MAX_HEADER_LENGTH = 8192\n MAX_PAIR_LENGTH = 4096\n MAX_PAIRS = 180\n _BAGGAGE_HEADER_NAME = \"baggage\"\n\n def extract(\n self,\n get_from_carrier: textmap.Getter[textmap.TextMapPropagatorT],\n carrier: textmap.TextMapPropagatorT,\n context: typing.Optional[Context] = None,\n ) -> Context:\n \"\"\"Extract Baggage from the carrier.\n\n See\n `opentelemetry.trace.propagation.textmap.TextMapPropagator.extract`\n \"\"\"\n\n if context is None:\n context = get_current()\n\n header = _extract_first_element(\n get_from_carrier(carrier, self._BAGGAGE_HEADER_NAME)\n )\n\n if not header or len(header) > self.MAX_HEADER_LENGTH:\n return context\n\n baggage_entries = header.split(\",\")\n total_baggage_entries = self.MAX_PAIRS\n for entry in baggage_entries:\n if total_baggage_entries <= 0:\n return context\n total_baggage_entries -= 1\n if len(entry) > self.MAX_PAIR_LENGTH:\n continue\n try:\n name, value = entry.split(\"=\", 1)\n except Exception: # pylint: disable=broad-except\n continue\n context = baggage.set_baggage(\n urllib.parse.unquote(name).strip(),\n urllib.parse.unquote(value).strip(),\n context=context,\n )\n\n return context\n\n def inject(\n self,\n set_in_carrier: textmap.Setter[textmap.TextMapPropagatorT],\n carrier: textmap.TextMapPropagatorT,\n context: typing.Optional[Context] = None,\n ) -> None:\n \"\"\"Injects Baggage into the carrier.\n\n See\n `opentelemetry.trace.propagation.textmap.TextMapPropagator.inject`\n \"\"\"\n baggage_entries = baggage.get_all(context=context)\n if not baggage_entries:\n return\n\n baggage_string = _format_baggage(baggage_entries)\n set_in_carrier(carrier, self._BAGGAGE_HEADER_NAME, baggage_string)\n\n\ndef _format_baggage(baggage_entries: typing.Mapping[str, object]) -> str:\n return \",\".join(\n key + \"=\" + urllib.parse.quote_plus(str(value))\n for key, value in baggage_entries.items()\n )\n\n\ndef _extract_first_element(\n items: typing.Iterable[textmap.TextMapPropagatorT],\n) -> typing.Optional[textmap.TextMapPropagatorT]:\n if items is None:\n return None\n return next(iter(items), None)\n", "path": "opentelemetry-api/src/opentelemetry/baggage/propagation/__init__.py"}]}
| 1,295 | 260 |
gh_patches_debug_456
|
rasdani/github-patches
|
git_diff
|
dbt-labs__dbt-core-2537
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Python 3.6.2 doesn't work with dbt 0.17.0
### Describe the bug
Running dbt on python <= 3.6.2 results in an error that `name 'TimestampSnapshotConfig' is not defined`. 3.6.3 is unaffected.
### Steps To Reproduce
Install python 3.6.2
Install dbt
Try to use dbt
### Expected behavior
dbt should run, not crash, etc
### System information
**Which database are you using dbt with?**
Any
**The output of `dbt --version`:**
```
0.17.0
```
**The operating system you're using:**
macos, linux
**The output of `python --version`:**
`Python 3.6.2`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `core/setup.py`
Content:
```
1 #!/usr/bin/env python
2 import os
3 import sys
4
5 from setuptools import setup
6 try:
7 from setuptools import find_namespace_packages
8 except ImportError:
9 # the user has a downlevel version of setuptools.
10 print('Error: dbt requires setuptools v40.1.0 or higher.')
11 print('Please upgrade setuptools with "pip install --upgrade setuptools" '
12 'and try again')
13 sys.exit(1)
14
15
16 def read(fname):
17 return open(os.path.join(os.path.dirname(__file__), fname)).read()
18
19
20 package_name = "dbt-core"
21 package_version = "0.17.1a1"
22 description = """dbt (data build tool) is a command line tool that helps \
23 analysts and engineers transform data in their warehouse more effectively"""
24
25
26 setup(
27 name=package_name,
28 version=package_version,
29 description=description,
30 long_description=description,
31 author="Fishtown Analytics",
32 author_email="[email protected]",
33 url="https://github.com/fishtown-analytics/dbt",
34 packages=find_namespace_packages(include=['dbt', 'dbt.*']),
35 package_data={
36 'dbt': [
37 'include/index.html',
38 'include/global_project/dbt_project.yml',
39 'include/global_project/docs/*.md',
40 'include/global_project/macros/*.sql',
41 'include/global_project/macros/**/*.sql',
42 'include/global_project/macros/**/**/*.sql',
43 'py.typed',
44 ]
45 },
46 test_suite='test',
47 entry_points={
48 'console_scripts': [
49 'dbt = dbt.main:main',
50 ],
51 },
52 scripts=[
53 'scripts/dbt',
54 ],
55 install_requires=[
56 'Jinja2==2.11.2',
57 'PyYAML>=3.11',
58 'sqlparse>=0.2.3,<0.4',
59 'networkx>=2.3,<3',
60 'minimal-snowplow-tracker==0.0.2',
61 'colorama>=0.3.9,<0.5',
62 'agate>=1.6,<2',
63 'isodate>=0.6,<0.7',
64 'json-rpc>=1.12,<2',
65 'werkzeug>=0.15,<0.17',
66 'dataclasses==0.6;python_version<"3.7"',
67 'hologram==0.0.7',
68 'logbook>=1.5,<1.6',
69 'typing-extensions>=3.7.4,<3.8',
70 # the following are all to match snowflake-connector-python
71 'requests>=2.18.0,<2.23.0',
72 'idna<2.9',
73 'cffi>=1.9,<1.14',
74 ],
75 zip_safe=False,
76 classifiers=[
77 'Development Status :: 5 - Production/Stable',
78
79 'License :: OSI Approved :: Apache Software License',
80
81 'Operating System :: Microsoft :: Windows',
82 'Operating System :: MacOS :: MacOS X',
83 'Operating System :: POSIX :: Linux',
84
85 'Programming Language :: Python :: 3.6',
86 'Programming Language :: Python :: 3.7',
87 'Programming Language :: Python :: 3.8',
88 ],
89 python_requires=">=3.6.2",
90 )
91
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/core/setup.py b/core/setup.py
--- a/core/setup.py
+++ b/core/setup.py
@@ -86,5 +86,5 @@
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
- python_requires=">=3.6.2",
+ python_requires=">=3.6.3",
)
|
{"golden_diff": "diff --git a/core/setup.py b/core/setup.py\n--- a/core/setup.py\n+++ b/core/setup.py\n@@ -86,5 +86,5 @@\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n ],\n- python_requires=\">=3.6.2\",\n+ python_requires=\">=3.6.3\",\n )\n", "issue": "Python 3.6.2 doesn't work with dbt 0.17.0\n### Describe the bug\r\nRunning dbt on python <= 3.6.2 results in an error that `name 'TimestampSnapshotConfig' is not defined`. 3.6.3 is unaffected.\r\n\r\n### Steps To Reproduce\r\nInstall python 3.6.2\r\nInstall dbt\r\nTry to use dbt\r\n\r\n### Expected behavior\r\ndbt should run, not crash, etc\r\n\r\n\r\n### System information\r\n**Which database are you using dbt with?**\r\nAny\r\n\r\n**The output of `dbt --version`:**\r\n```\r\n0.17.0\r\n```\r\n\r\n**The operating system you're using:**\r\nmacos, linux\r\n\r\n**The output of `python --version`:**\r\n`Python 3.6.2`\r\n\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\nimport os\nimport sys\n\nfrom setuptools import setup\ntry:\n from setuptools import find_namespace_packages\nexcept ImportError:\n # the user has a downlevel version of setuptools.\n print('Error: dbt requires setuptools v40.1.0 or higher.')\n print('Please upgrade setuptools with \"pip install --upgrade setuptools\" '\n 'and try again')\n sys.exit(1)\n\n\ndef read(fname):\n return open(os.path.join(os.path.dirname(__file__), fname)).read()\n\n\npackage_name = \"dbt-core\"\npackage_version = \"0.17.1a1\"\ndescription = \"\"\"dbt (data build tool) is a command line tool that helps \\\nanalysts and engineers transform data in their warehouse more effectively\"\"\"\n\n\nsetup(\n name=package_name,\n version=package_version,\n description=description,\n long_description=description,\n author=\"Fishtown Analytics\",\n author_email=\"[email protected]\",\n url=\"https://github.com/fishtown-analytics/dbt\",\n packages=find_namespace_packages(include=['dbt', 'dbt.*']),\n package_data={\n 'dbt': [\n 'include/index.html',\n 'include/global_project/dbt_project.yml',\n 'include/global_project/docs/*.md',\n 'include/global_project/macros/*.sql',\n 'include/global_project/macros/**/*.sql',\n 'include/global_project/macros/**/**/*.sql',\n 'py.typed',\n ]\n },\n test_suite='test',\n entry_points={\n 'console_scripts': [\n 'dbt = dbt.main:main',\n ],\n },\n scripts=[\n 'scripts/dbt',\n ],\n install_requires=[\n 'Jinja2==2.11.2',\n 'PyYAML>=3.11',\n 'sqlparse>=0.2.3,<0.4',\n 'networkx>=2.3,<3',\n 'minimal-snowplow-tracker==0.0.2',\n 'colorama>=0.3.9,<0.5',\n 'agate>=1.6,<2',\n 'isodate>=0.6,<0.7',\n 'json-rpc>=1.12,<2',\n 'werkzeug>=0.15,<0.17',\n 'dataclasses==0.6;python_version<\"3.7\"',\n 'hologram==0.0.7',\n 'logbook>=1.5,<1.6',\n 'typing-extensions>=3.7.4,<3.8',\n # the following are all to match snowflake-connector-python\n 'requests>=2.18.0,<2.23.0',\n 'idna<2.9',\n 'cffi>=1.9,<1.14',\n ],\n zip_safe=False,\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n\n 'License :: OSI Approved :: Apache Software License',\n\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: POSIX :: Linux',\n\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n ],\n python_requires=\">=3.6.2\",\n)\n", "path": "core/setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\nimport os\nimport sys\n\nfrom setuptools import setup\ntry:\n from setuptools import find_namespace_packages\nexcept ImportError:\n # the user has a downlevel version of setuptools.\n print('Error: dbt requires setuptools v40.1.0 or higher.')\n print('Please upgrade setuptools with \"pip install --upgrade setuptools\" '\n 'and try again')\n sys.exit(1)\n\n\ndef read(fname):\n return open(os.path.join(os.path.dirname(__file__), fname)).read()\n\n\npackage_name = \"dbt-core\"\npackage_version = \"0.17.1a1\"\ndescription = \"\"\"dbt (data build tool) is a command line tool that helps \\\nanalysts and engineers transform data in their warehouse more effectively\"\"\"\n\n\nsetup(\n name=package_name,\n version=package_version,\n description=description,\n long_description=description,\n author=\"Fishtown Analytics\",\n author_email=\"[email protected]\",\n url=\"https://github.com/fishtown-analytics/dbt\",\n packages=find_namespace_packages(include=['dbt', 'dbt.*']),\n package_data={\n 'dbt': [\n 'include/index.html',\n 'include/global_project/dbt_project.yml',\n 'include/global_project/docs/*.md',\n 'include/global_project/macros/*.sql',\n 'include/global_project/macros/**/*.sql',\n 'include/global_project/macros/**/**/*.sql',\n 'py.typed',\n ]\n },\n test_suite='test',\n entry_points={\n 'console_scripts': [\n 'dbt = dbt.main:main',\n ],\n },\n scripts=[\n 'scripts/dbt',\n ],\n install_requires=[\n 'Jinja2==2.11.2',\n 'PyYAML>=3.11',\n 'sqlparse>=0.2.3,<0.4',\n 'networkx>=2.3,<3',\n 'minimal-snowplow-tracker==0.0.2',\n 'colorama>=0.3.9,<0.5',\n 'agate>=1.6,<2',\n 'isodate>=0.6,<0.7',\n 'json-rpc>=1.12,<2',\n 'werkzeug>=0.15,<0.17',\n 'dataclasses==0.6;python_version<\"3.7\"',\n 'hologram==0.0.7',\n 'logbook>=1.5,<1.6',\n 'typing-extensions>=3.7.4,<3.8',\n # the following are all to match snowflake-connector-python\n 'requests>=2.18.0,<2.23.0',\n 'idna<2.9',\n 'cffi>=1.9,<1.14',\n ],\n zip_safe=False,\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n\n 'License :: OSI Approved :: Apache Software License',\n\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: POSIX :: Linux',\n\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n ],\n python_requires=\">=3.6.3\",\n)\n", "path": "core/setup.py"}]}
| 1,329 | 87 |
gh_patches_debug_31382
|
rasdani/github-patches
|
git_diff
|
alltheplaces__alltheplaces-2814
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Spider pricerite is broken
During the global build at 2021-05-26-14-42-23, spider **pricerite** failed with **0 features** and **2 errors**.
Here's [the log](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/logs/pricerite.log) and [the output](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/pricerite.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/pricerite.geojson))
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `locations/spiders/pricerite.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 import json
3 import re
4
5 import scrapy
6
7 from locations.items import GeojsonPointItem
8 from locations.hours import OpeningHours
9
10
11 class PriceRiteSpider(scrapy.Spider):
12 name = "pricerite"
13 item_attributes = { 'brand': "PriceRite" }
14 allowed_domains = ["priceritesupermarkets.com"]
15
16 start_urls = (
17 "https://www.priceritesupermarkets.com/locations/",
18 )
19
20 def parse(self, response):
21 script = response.xpath('//script[contains(text(), "var stores")]').extract_first()
22 stores = json.loads(re.search(r'var stores = (.*?);', script).groups()[0])
23
24 for store in stores:
25 properties = {
26 "ref": store["storeNumber"],
27 "name": store["name"],
28 "lat": store["latitude"],
29 "lon": store["longitude"],
30 "addr_full": store["address1"],
31 "city": store["city"],
32 "state": store["state"],
33 "postcode": store["zipCode"],
34 }
35
36 yield GeojsonPointItem(**properties)
37
38
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/locations/spiders/pricerite.py b/locations/spiders/pricerite.py
--- a/locations/spiders/pricerite.py
+++ b/locations/spiders/pricerite.py
@@ -5,32 +5,36 @@
import scrapy
from locations.items import GeojsonPointItem
-from locations.hours import OpeningHours
class PriceRiteSpider(scrapy.Spider):
name = "pricerite"
item_attributes = { 'brand': "PriceRite" }
- allowed_domains = ["priceritesupermarkets.com"]
+ allowed_domains = ["priceritemarketplace.com"]
start_urls = (
- "https://www.priceritesupermarkets.com/locations/",
+ "https://www.priceritemarketplace.com/",
)
def parse(self, response):
- script = response.xpath('//script[contains(text(), "var stores")]').extract_first()
- stores = json.loads(re.search(r'var stores = (.*?);', script).groups()[0])
+ script = response.xpath('//script[contains(text(), "__PRELOADED_STATE__")]/text()').extract_first()
+ script = script[script.index('{'):]
+ stores = json.loads(script)['stores']['availablePlanningStores']['items']
for store in stores:
+ ref = store["retailerStoreId"]
properties = {
- "ref": store["storeNumber"],
+ "ref": ref,
+ "website": f"https://www.priceritemarketplace.com/sm/planning/rsid/{ref}",
"name": store["name"],
- "lat": store["latitude"],
- "lon": store["longitude"],
- "addr_full": store["address1"],
+ "lat": store["location"]["latitude"],
+ "lon": store["location"]["longitude"],
+ "addr_full": store["addressLine1"],
"city": store["city"],
- "state": store["state"],
- "postcode": store["zipCode"],
+ "state": store["countyProvinceState"],
+ "postcode": store["postCode"],
+ "phone": store["phone"],
+ "opening_hours": store["openingHours"],
}
yield GeojsonPointItem(**properties)
|
{"golden_diff": "diff --git a/locations/spiders/pricerite.py b/locations/spiders/pricerite.py\n--- a/locations/spiders/pricerite.py\n+++ b/locations/spiders/pricerite.py\n@@ -5,32 +5,36 @@\n import scrapy\n \n from locations.items import GeojsonPointItem\n-from locations.hours import OpeningHours\n \n \n class PriceRiteSpider(scrapy.Spider):\n name = \"pricerite\"\n item_attributes = { 'brand': \"PriceRite\" }\n- allowed_domains = [\"priceritesupermarkets.com\"]\n+ allowed_domains = [\"priceritemarketplace.com\"]\n \n start_urls = (\n- \"https://www.priceritesupermarkets.com/locations/\",\n+ \"https://www.priceritemarketplace.com/\",\n )\n \n def parse(self, response):\n- script = response.xpath('//script[contains(text(), \"var stores\")]').extract_first()\n- stores = json.loads(re.search(r'var stores = (.*?);', script).groups()[0])\n+ script = response.xpath('//script[contains(text(), \"__PRELOADED_STATE__\")]/text()').extract_first()\n+ script = script[script.index('{'):]\n+ stores = json.loads(script)['stores']['availablePlanningStores']['items']\n \n for store in stores:\n+ ref = store[\"retailerStoreId\"]\n properties = {\n- \"ref\": store[\"storeNumber\"],\n+ \"ref\": ref,\n+ \"website\": f\"https://www.priceritemarketplace.com/sm/planning/rsid/{ref}\",\n \"name\": store[\"name\"],\n- \"lat\": store[\"latitude\"],\n- \"lon\": store[\"longitude\"],\n- \"addr_full\": store[\"address1\"],\n+ \"lat\": store[\"location\"][\"latitude\"],\n+ \"lon\": store[\"location\"][\"longitude\"],\n+ \"addr_full\": store[\"addressLine1\"],\n \"city\": store[\"city\"],\n- \"state\": store[\"state\"],\n- \"postcode\": store[\"zipCode\"],\n+ \"state\": store[\"countyProvinceState\"],\n+ \"postcode\": store[\"postCode\"],\n+ \"phone\": store[\"phone\"],\n+ \"opening_hours\": store[\"openingHours\"],\n }\n \n yield GeojsonPointItem(**properties)\n", "issue": "Spider pricerite is broken\nDuring the global build at 2021-05-26-14-42-23, spider **pricerite** failed with **0 features** and **2 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/logs/pricerite.log) and [the output](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/pricerite.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/pricerite.geojson))\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport json\nimport re\n\nimport scrapy\n\nfrom locations.items import GeojsonPointItem\nfrom locations.hours import OpeningHours\n\n\nclass PriceRiteSpider(scrapy.Spider):\n name = \"pricerite\"\n item_attributes = { 'brand': \"PriceRite\" }\n allowed_domains = [\"priceritesupermarkets.com\"]\n\n start_urls = (\n \"https://www.priceritesupermarkets.com/locations/\",\n )\n\n def parse(self, response):\n script = response.xpath('//script[contains(text(), \"var stores\")]').extract_first()\n stores = json.loads(re.search(r'var stores = (.*?);', script).groups()[0])\n\n for store in stores:\n properties = {\n \"ref\": store[\"storeNumber\"],\n \"name\": store[\"name\"],\n \"lat\": store[\"latitude\"],\n \"lon\": store[\"longitude\"],\n \"addr_full\": store[\"address1\"],\n \"city\": store[\"city\"],\n \"state\": store[\"state\"],\n \"postcode\": store[\"zipCode\"],\n }\n\n yield GeojsonPointItem(**properties)\n\n", "path": "locations/spiders/pricerite.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nimport json\nimport re\n\nimport scrapy\n\nfrom locations.items import GeojsonPointItem\n\n\nclass PriceRiteSpider(scrapy.Spider):\n name = \"pricerite\"\n item_attributes = { 'brand': \"PriceRite\" }\n allowed_domains = [\"priceritemarketplace.com\"]\n\n start_urls = (\n \"https://www.priceritemarketplace.com/\",\n )\n\n def parse(self, response):\n script = response.xpath('//script[contains(text(), \"__PRELOADED_STATE__\")]/text()').extract_first()\n script = script[script.index('{'):]\n stores = json.loads(script)['stores']['availablePlanningStores']['items']\n\n for store in stores:\n ref = store[\"retailerStoreId\"]\n properties = {\n \"ref\": ref,\n \"website\": f\"https://www.priceritemarketplace.com/sm/planning/rsid/{ref}\",\n \"name\": store[\"name\"],\n \"lat\": store[\"location\"][\"latitude\"],\n \"lon\": store[\"location\"][\"longitude\"],\n \"addr_full\": store[\"addressLine1\"],\n \"city\": store[\"city\"],\n \"state\": store[\"countyProvinceState\"],\n \"postcode\": store[\"postCode\"],\n \"phone\": store[\"phone\"],\n \"opening_hours\": store[\"openingHours\"],\n }\n\n yield GeojsonPointItem(**properties)\n\n", "path": "locations/spiders/pricerite.py"}]}
| 758 | 489 |
gh_patches_debug_30432
|
rasdani/github-patches
|
git_diff
|
Lightning-AI__torchmetrics-2330
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Can MultitaskWrapper.clone() have a postfix or prefix arg?
## 🚀 Feature
A `prefix` and `postfix` parameter to pass into the `clone()` method of `MultitaskWrapper`
### Motivation
I have fabricated a `MultitaskWrapper` to measure multiple task metrics for trainning, and wanted to clone it for validation and test.
```
self.metrics = MultitaskWrapper({
'Img': MetricCollection({
'PSNR': PeakSignalNoiseRatio(data_range=1.0),
'SSIM': StructuralSimilarityIndexMeasure(data_range=1.0),
}),
'DoP': MetricCollection({`some_metrics`: SomeMetrics()}),
'AoP': MetricCollection({`other_metrics`: OtherMetrics()})
})
```
What I want to do is:
`self.val_metrics = self.metrics.clone(postfix='_val')` so that we can obtain
```
self.val_metrics = MultitaskWrapper({
'Img_val': MetricCollection({
'PSNR': PeakSignalNoiseRatio(data_range=1.0),
'SSIM': StructuralSimilarityIndexMeasure(data_range=1.0),
}),
'DoP_val': MetricCollection({`some_metrics`: SomeMetrics()}),
'AoP_val': MetricCollection({`other_metrics`: OtherMetrics()})
})
```
However, there is no such a postfix arg for `clone()`. So I can only re-define a `MultitaskWrapper` for `self.val_metrics` instead of simply cloning form `self.metrics`.
Is there any way to handle it?
Thx.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/torchmetrics/wrappers/multitask.py`
Content:
```
1 # Copyright The Lightning team.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 # this is just a bypass for this module name collision with built-in one
15 from typing import Any, Dict, Iterable, Optional, Sequence, Tuple, Union
16
17 from torch import Tensor, nn
18
19 from torchmetrics.collections import MetricCollection
20 from torchmetrics.metric import Metric
21 from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE
22 from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE
23 from torchmetrics.wrappers.abstract import WrapperMetric
24
25 if not _MATPLOTLIB_AVAILABLE:
26 __doctest_skip__ = ["MultitaskWrapper.plot"]
27
28
29 class MultitaskWrapper(WrapperMetric):
30 """Wrapper class for computing different metrics on different tasks in the context of multitask learning.
31
32 In multitask learning the different tasks requires different metrics to be evaluated. This wrapper allows
33 for easy evaluation in such cases by supporting multiple predictions and targets through a dictionary.
34 Note that only metrics where the signature of `update` follows the standard `preds, target` is supported.
35
36 Args:
37 task_metrics:
38 Dictionary associating each task to a Metric or a MetricCollection. The keys of the dictionary represent the
39 names of the tasks, and the values represent the metrics to use for each task.
40
41 Raises:
42 TypeError:
43 If argument `task_metrics` is not an dictionary
44 TypeError:
45 If not all values in the `task_metrics` dictionary is instances of `Metric` or `MetricCollection`
46
47 Example (with a single metric per class):
48 >>> import torch
49 >>> from torchmetrics.wrappers import MultitaskWrapper
50 >>> from torchmetrics.regression import MeanSquaredError
51 >>> from torchmetrics.classification import BinaryAccuracy
52 >>>
53 >>> classification_target = torch.tensor([0, 1, 0])
54 >>> regression_target = torch.tensor([2.5, 5.0, 4.0])
55 >>> targets = {"Classification": classification_target, "Regression": regression_target}
56 >>>
57 >>> classification_preds = torch.tensor([0, 0, 1])
58 >>> regression_preds = torch.tensor([3.0, 5.0, 2.5])
59 >>> preds = {"Classification": classification_preds, "Regression": regression_preds}
60 >>>
61 >>> metrics = MultitaskWrapper({
62 ... "Classification": BinaryAccuracy(),
63 ... "Regression": MeanSquaredError()
64 ... })
65 >>> metrics.update(preds, targets)
66 >>> metrics.compute()
67 {'Classification': tensor(0.3333), 'Regression': tensor(0.8333)}
68
69 Example (with several metrics per task):
70 >>> import torch
71 >>> from torchmetrics import MetricCollection
72 >>> from torchmetrics.wrappers import MultitaskWrapper
73 >>> from torchmetrics.regression import MeanSquaredError, MeanAbsoluteError
74 >>> from torchmetrics.classification import BinaryAccuracy, BinaryF1Score
75 >>>
76 >>> classification_target = torch.tensor([0, 1, 0])
77 >>> regression_target = torch.tensor([2.5, 5.0, 4.0])
78 >>> targets = {"Classification": classification_target, "Regression": regression_target}
79 >>>
80 >>> classification_preds = torch.tensor([0, 0, 1])
81 >>> regression_preds = torch.tensor([3.0, 5.0, 2.5])
82 >>> preds = {"Classification": classification_preds, "Regression": regression_preds}
83 >>>
84 >>> metrics = MultitaskWrapper({
85 ... "Classification": MetricCollection(BinaryAccuracy(), BinaryF1Score()),
86 ... "Regression": MetricCollection(MeanSquaredError(), MeanAbsoluteError())
87 ... })
88 >>> metrics.update(preds, targets)
89 >>> metrics.compute()
90 {'Classification': {'BinaryAccuracy': tensor(0.3333), 'BinaryF1Score': tensor(0.)},
91 'Regression': {'MeanSquaredError': tensor(0.8333), 'MeanAbsoluteError': tensor(0.6667)}}
92
93 """
94
95 is_differentiable = False
96
97 def __init__(
98 self,
99 task_metrics: Dict[str, Union[Metric, MetricCollection]],
100 ) -> None:
101 self._check_task_metrics_type(task_metrics)
102 super().__init__()
103 self.task_metrics = nn.ModuleDict(task_metrics)
104
105 def items(self) -> Iterable[Tuple[str, nn.Module]]:
106 """Iterate over task and task metrics."""
107 return self.task_metrics.items()
108
109 def keys(self) -> Iterable[str]:
110 """Iterate over task names."""
111 return self.task_metrics.keys()
112
113 def values(self) -> Iterable[nn.Module]:
114 """Iterate over task metrics."""
115 return self.task_metrics.values()
116
117 @staticmethod
118 def _check_task_metrics_type(task_metrics: Dict[str, Union[Metric, MetricCollection]]) -> None:
119 if not isinstance(task_metrics, dict):
120 raise TypeError(f"Expected argument `task_metrics` to be a dict. Found task_metrics = {task_metrics}")
121
122 for metric in task_metrics.values():
123 if not (isinstance(metric, (Metric, MetricCollection))):
124 raise TypeError(
125 "Expected each task's metric to be a Metric or a MetricCollection. "
126 f"Found a metric of type {type(metric)}"
127 )
128
129 def update(self, task_preds: Dict[str, Tensor], task_targets: Dict[str, Tensor]) -> None:
130 """Update each task's metric with its corresponding pred and target.
131
132 Args:
133 task_preds: Dictionary associating each task to a Tensor of pred.
134 task_targets: Dictionary associating each task to a Tensor of target.
135
136 """
137 if not self.task_metrics.keys() == task_preds.keys() == task_targets.keys():
138 raise ValueError(
139 "Expected arguments `task_preds` and `task_targets` to have the same keys as the wrapped `task_metrics`"
140 f". Found task_preds.keys() = {task_preds.keys()}, task_targets.keys() = {task_targets.keys()} "
141 f"and self.task_metrics.keys() = {self.task_metrics.keys()}"
142 )
143
144 for task_name, metric in self.task_metrics.items():
145 pred = task_preds[task_name]
146 target = task_targets[task_name]
147 metric.update(pred, target)
148
149 def compute(self) -> Dict[str, Any]:
150 """Compute metrics for all tasks."""
151 return {task_name: metric.compute() for task_name, metric in self.task_metrics.items()}
152
153 def forward(self, task_preds: Dict[str, Tensor], task_targets: Dict[str, Tensor]) -> Dict[str, Any]:
154 """Call underlying forward methods for all tasks and return the result as a dictionary."""
155 # This method is overridden because we do not need the complex version defined in Metric, that relies on the
156 # value of full_state_update, and that also accumulates the results. Here, all computations are handled by the
157 # underlying metrics, which all have their own value of full_state_update, and which all accumulate the results
158 # by themselves.
159 return {
160 task_name: metric(task_preds[task_name], task_targets[task_name])
161 for task_name, metric in self.task_metrics.items()
162 }
163
164 def reset(self) -> None:
165 """Reset all underlying metrics."""
166 for metric in self.task_metrics.values():
167 metric.reset()
168 super().reset()
169
170 def plot(
171 self, val: Optional[Union[Dict, Sequence[Dict]]] = None, axes: Optional[Sequence[_AX_TYPE]] = None
172 ) -> Sequence[_PLOT_OUT_TYPE]:
173 """Plot a single or multiple values from the metric.
174
175 All tasks' results are plotted on individual axes.
176
177 Args:
178 val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
179 If no value is provided, will automatically call `metric.compute` and plot that result.
180 axes: Sequence of matplotlib axis objects. If provided, will add the plots to the provided axis objects.
181 If not provided, will create them.
182
183 Returns:
184 Sequence of tuples with Figure and Axes object for each task.
185
186 .. plot::
187 :scale: 75
188
189 >>> # Example plotting a single value
190 >>> import torch
191 >>> from torchmetrics.wrappers import MultitaskWrapper
192 >>> from torchmetrics.regression import MeanSquaredError
193 >>> from torchmetrics.classification import BinaryAccuracy
194 >>>
195 >>> classification_target = torch.tensor([0, 1, 0])
196 >>> regression_target = torch.tensor([2.5, 5.0, 4.0])
197 >>> targets = {"Classification": classification_target, "Regression": regression_target}
198 >>>
199 >>> classification_preds = torch.tensor([0, 0, 1])
200 >>> regression_preds = torch.tensor([3.0, 5.0, 2.5])
201 >>> preds = {"Classification": classification_preds, "Regression": regression_preds}
202 >>>
203 >>> metrics = MultitaskWrapper({
204 ... "Classification": BinaryAccuracy(),
205 ... "Regression": MeanSquaredError()
206 ... })
207 >>> metrics.update(preds, targets)
208 >>> value = metrics.compute()
209 >>> fig_, ax_ = metrics.plot(value)
210
211 .. plot::
212 :scale: 75
213
214 >>> # Example plotting multiple values
215 >>> import torch
216 >>> from torchmetrics.wrappers import MultitaskWrapper
217 >>> from torchmetrics.regression import MeanSquaredError
218 >>> from torchmetrics.classification import BinaryAccuracy
219 >>>
220 >>> classification_target = torch.tensor([0, 1, 0])
221 >>> regression_target = torch.tensor([2.5, 5.0, 4.0])
222 >>> targets = {"Classification": classification_target, "Regression": regression_target}
223 >>>
224 >>> classification_preds = torch.tensor([0, 0, 1])
225 >>> regression_preds = torch.tensor([3.0, 5.0, 2.5])
226 >>> preds = {"Classification": classification_preds, "Regression": regression_preds}
227 >>>
228 >>> metrics = MultitaskWrapper({
229 ... "Classification": BinaryAccuracy(),
230 ... "Regression": MeanSquaredError()
231 ... })
232 >>> values = []
233 >>> for _ in range(10):
234 ... values.append(metrics(preds, targets))
235 >>> fig_, ax_ = metrics.plot(values)
236
237 """
238 if axes is not None:
239 if not isinstance(axes, Sequence):
240 raise TypeError(f"Expected argument `axes` to be a Sequence. Found type(axes) = {type(axes)}")
241
242 if not all(isinstance(ax, _AX_TYPE) for ax in axes):
243 raise TypeError("Expected each ax in argument `axes` to be a matplotlib axis object")
244
245 if len(axes) != len(self.task_metrics):
246 raise ValueError(
247 "Expected argument `axes` to be a Sequence of the same length as the number of tasks."
248 f"Found len(axes) = {len(axes)} and {len(self.task_metrics)} tasks"
249 )
250
251 val = val if val is not None else self.compute()
252 fig_axs = []
253 for i, (task_name, task_metric) in enumerate(self.task_metrics.items()):
254 ax = axes[i] if axes is not None else None
255 if isinstance(val, Dict):
256 f, a = task_metric.plot(val[task_name], ax=ax)
257 elif isinstance(val, Sequence):
258 f, a = task_metric.plot([v[task_name] for v in val], ax=ax)
259 else:
260 raise TypeError(
261 "Expected argument `val` to be None or of type Dict or Sequence[Dict]. "
262 f"Found type(val)= {type(val)}"
263 )
264 fig_axs.append((f, a))
265 return fig_axs
266
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/torchmetrics/wrappers/multitask.py b/src/torchmetrics/wrappers/multitask.py
--- a/src/torchmetrics/wrappers/multitask.py
+++ b/src/torchmetrics/wrappers/multitask.py
@@ -12,6 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# this is just a bypass for this module name collision with built-in one
+from copy import deepcopy
from typing import Any, Dict, Iterable, Optional, Sequence, Tuple, Union
from torch import Tensor, nn
@@ -167,6 +168,33 @@
metric.reset()
super().reset()
+ @staticmethod
+ def _check_arg(arg: Optional[str], name: str) -> Optional[str]:
+ if arg is None or isinstance(arg, str):
+ return arg
+ raise ValueError(f"Expected input `{name}` to be a string, but got {type(arg)}")
+
+ def clone(self, prefix: Optional[str] = None, postfix: Optional[str] = None) -> "MultitaskWrapper":
+ """Make a copy of the metric.
+
+ Args:
+ prefix: a string to append in front of the metric keys
+ postfix: a string to append after the keys of the output dict.
+
+ """
+ multitask_copy = deepcopy(self)
+ if prefix is not None:
+ prefix = self._check_arg(prefix, "prefix")
+ multitask_copy.task_metrics = nn.ModuleDict(
+ {prefix + key: value for key, value in multitask_copy.task_metrics.items()}
+ )
+ if postfix is not None:
+ postfix = self._check_arg(postfix, "postfix")
+ multitask_copy.task_metrics = nn.ModuleDict(
+ {key + postfix: value for key, value in multitask_copy.task_metrics.items()}
+ )
+ return multitask_copy
+
def plot(
self, val: Optional[Union[Dict, Sequence[Dict]]] = None, axes: Optional[Sequence[_AX_TYPE]] = None
) -> Sequence[_PLOT_OUT_TYPE]:
|
{"golden_diff": "diff --git a/src/torchmetrics/wrappers/multitask.py b/src/torchmetrics/wrappers/multitask.py\n--- a/src/torchmetrics/wrappers/multitask.py\n+++ b/src/torchmetrics/wrappers/multitask.py\n@@ -12,6 +12,7 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n # this is just a bypass for this module name collision with built-in one\n+from copy import deepcopy\n from typing import Any, Dict, Iterable, Optional, Sequence, Tuple, Union\n \n from torch import Tensor, nn\n@@ -167,6 +168,33 @@\n metric.reset()\n super().reset()\n \n+ @staticmethod\n+ def _check_arg(arg: Optional[str], name: str) -> Optional[str]:\n+ if arg is None or isinstance(arg, str):\n+ return arg\n+ raise ValueError(f\"Expected input `{name}` to be a string, but got {type(arg)}\")\n+\n+ def clone(self, prefix: Optional[str] = None, postfix: Optional[str] = None) -> \"MultitaskWrapper\":\n+ \"\"\"Make a copy of the metric.\n+\n+ Args:\n+ prefix: a string to append in front of the metric keys\n+ postfix: a string to append after the keys of the output dict.\n+\n+ \"\"\"\n+ multitask_copy = deepcopy(self)\n+ if prefix is not None:\n+ prefix = self._check_arg(prefix, \"prefix\")\n+ multitask_copy.task_metrics = nn.ModuleDict(\n+ {prefix + key: value for key, value in multitask_copy.task_metrics.items()}\n+ )\n+ if postfix is not None:\n+ postfix = self._check_arg(postfix, \"postfix\")\n+ multitask_copy.task_metrics = nn.ModuleDict(\n+ {key + postfix: value for key, value in multitask_copy.task_metrics.items()}\n+ )\n+ return multitask_copy\n+\n def plot(\n self, val: Optional[Union[Dict, Sequence[Dict]]] = None, axes: Optional[Sequence[_AX_TYPE]] = None\n ) -> Sequence[_PLOT_OUT_TYPE]:\n", "issue": "Can MultitaskWrapper.clone() have a postfix or prefix arg?\n## \ud83d\ude80 Feature\r\nA `prefix` and `postfix` parameter to pass into the `clone()` method of `MultitaskWrapper`\r\n\r\n### Motivation\r\n\r\nI have fabricated a `MultitaskWrapper` to measure multiple task metrics for trainning, and wanted to clone it for validation and test.\r\n```\r\nself.metrics = MultitaskWrapper({\r\n 'Img': MetricCollection({\r\n 'PSNR': PeakSignalNoiseRatio(data_range=1.0),\r\n 'SSIM': StructuralSimilarityIndexMeasure(data_range=1.0),\r\n }),\r\n 'DoP': MetricCollection({`some_metrics`: SomeMetrics()}),\r\n 'AoP': MetricCollection({`other_metrics`: OtherMetrics()})\r\n })\r\n```\r\nWhat I want to do is:\r\n`self.val_metrics = self.metrics.clone(postfix='_val')` so that we can obtain\r\n```\r\nself.val_metrics = MultitaskWrapper({\r\n 'Img_val': MetricCollection({\r\n 'PSNR': PeakSignalNoiseRatio(data_range=1.0),\r\n 'SSIM': StructuralSimilarityIndexMeasure(data_range=1.0),\r\n }),\r\n 'DoP_val': MetricCollection({`some_metrics`: SomeMetrics()}),\r\n 'AoP_val': MetricCollection({`other_metrics`: OtherMetrics()})\r\n })\r\n```\r\nHowever, there is no such a postfix arg for `clone()`. So I can only re-define a `MultitaskWrapper` for `self.val_metrics` instead of simply cloning form `self.metrics`.\r\n\r\nIs there any way to handle it?\r\nThx.\r\n\n", "before_files": [{"content": "# Copyright The Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# this is just a bypass for this module name collision with built-in one\nfrom typing import Any, Dict, Iterable, Optional, Sequence, Tuple, Union\n\nfrom torch import Tensor, nn\n\nfrom torchmetrics.collections import MetricCollection\nfrom torchmetrics.metric import Metric\nfrom torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE\nfrom torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE\nfrom torchmetrics.wrappers.abstract import WrapperMetric\n\nif not _MATPLOTLIB_AVAILABLE:\n __doctest_skip__ = [\"MultitaskWrapper.plot\"]\n\n\nclass MultitaskWrapper(WrapperMetric):\n \"\"\"Wrapper class for computing different metrics on different tasks in the context of multitask learning.\n\n In multitask learning the different tasks requires different metrics to be evaluated. This wrapper allows\n for easy evaluation in such cases by supporting multiple predictions and targets through a dictionary.\n Note that only metrics where the signature of `update` follows the standard `preds, target` is supported.\n\n Args:\n task_metrics:\n Dictionary associating each task to a Metric or a MetricCollection. The keys of the dictionary represent the\n names of the tasks, and the values represent the metrics to use for each task.\n\n Raises:\n TypeError:\n If argument `task_metrics` is not an dictionary\n TypeError:\n If not all values in the `task_metrics` dictionary is instances of `Metric` or `MetricCollection`\n\n Example (with a single metric per class):\n >>> import torch\n >>> from torchmetrics.wrappers import MultitaskWrapper\n >>> from torchmetrics.regression import MeanSquaredError\n >>> from torchmetrics.classification import BinaryAccuracy\n >>>\n >>> classification_target = torch.tensor([0, 1, 0])\n >>> regression_target = torch.tensor([2.5, 5.0, 4.0])\n >>> targets = {\"Classification\": classification_target, \"Regression\": regression_target}\n >>>\n >>> classification_preds = torch.tensor([0, 0, 1])\n >>> regression_preds = torch.tensor([3.0, 5.0, 2.5])\n >>> preds = {\"Classification\": classification_preds, \"Regression\": regression_preds}\n >>>\n >>> metrics = MultitaskWrapper({\n ... \"Classification\": BinaryAccuracy(),\n ... \"Regression\": MeanSquaredError()\n ... })\n >>> metrics.update(preds, targets)\n >>> metrics.compute()\n {'Classification': tensor(0.3333), 'Regression': tensor(0.8333)}\n\n Example (with several metrics per task):\n >>> import torch\n >>> from torchmetrics import MetricCollection\n >>> from torchmetrics.wrappers import MultitaskWrapper\n >>> from torchmetrics.regression import MeanSquaredError, MeanAbsoluteError\n >>> from torchmetrics.classification import BinaryAccuracy, BinaryF1Score\n >>>\n >>> classification_target = torch.tensor([0, 1, 0])\n >>> regression_target = torch.tensor([2.5, 5.0, 4.0])\n >>> targets = {\"Classification\": classification_target, \"Regression\": regression_target}\n >>>\n >>> classification_preds = torch.tensor([0, 0, 1])\n >>> regression_preds = torch.tensor([3.0, 5.0, 2.5])\n >>> preds = {\"Classification\": classification_preds, \"Regression\": regression_preds}\n >>>\n >>> metrics = MultitaskWrapper({\n ... \"Classification\": MetricCollection(BinaryAccuracy(), BinaryF1Score()),\n ... \"Regression\": MetricCollection(MeanSquaredError(), MeanAbsoluteError())\n ... })\n >>> metrics.update(preds, targets)\n >>> metrics.compute()\n {'Classification': {'BinaryAccuracy': tensor(0.3333), 'BinaryF1Score': tensor(0.)},\n 'Regression': {'MeanSquaredError': tensor(0.8333), 'MeanAbsoluteError': tensor(0.6667)}}\n\n \"\"\"\n\n is_differentiable = False\n\n def __init__(\n self,\n task_metrics: Dict[str, Union[Metric, MetricCollection]],\n ) -> None:\n self._check_task_metrics_type(task_metrics)\n super().__init__()\n self.task_metrics = nn.ModuleDict(task_metrics)\n\n def items(self) -> Iterable[Tuple[str, nn.Module]]:\n \"\"\"Iterate over task and task metrics.\"\"\"\n return self.task_metrics.items()\n\n def keys(self) -> Iterable[str]:\n \"\"\"Iterate over task names.\"\"\"\n return self.task_metrics.keys()\n\n def values(self) -> Iterable[nn.Module]:\n \"\"\"Iterate over task metrics.\"\"\"\n return self.task_metrics.values()\n\n @staticmethod\n def _check_task_metrics_type(task_metrics: Dict[str, Union[Metric, MetricCollection]]) -> None:\n if not isinstance(task_metrics, dict):\n raise TypeError(f\"Expected argument `task_metrics` to be a dict. Found task_metrics = {task_metrics}\")\n\n for metric in task_metrics.values():\n if not (isinstance(metric, (Metric, MetricCollection))):\n raise TypeError(\n \"Expected each task's metric to be a Metric or a MetricCollection. \"\n f\"Found a metric of type {type(metric)}\"\n )\n\n def update(self, task_preds: Dict[str, Tensor], task_targets: Dict[str, Tensor]) -> None:\n \"\"\"Update each task's metric with its corresponding pred and target.\n\n Args:\n task_preds: Dictionary associating each task to a Tensor of pred.\n task_targets: Dictionary associating each task to a Tensor of target.\n\n \"\"\"\n if not self.task_metrics.keys() == task_preds.keys() == task_targets.keys():\n raise ValueError(\n \"Expected arguments `task_preds` and `task_targets` to have the same keys as the wrapped `task_metrics`\"\n f\". Found task_preds.keys() = {task_preds.keys()}, task_targets.keys() = {task_targets.keys()} \"\n f\"and self.task_metrics.keys() = {self.task_metrics.keys()}\"\n )\n\n for task_name, metric in self.task_metrics.items():\n pred = task_preds[task_name]\n target = task_targets[task_name]\n metric.update(pred, target)\n\n def compute(self) -> Dict[str, Any]:\n \"\"\"Compute metrics for all tasks.\"\"\"\n return {task_name: metric.compute() for task_name, metric in self.task_metrics.items()}\n\n def forward(self, task_preds: Dict[str, Tensor], task_targets: Dict[str, Tensor]) -> Dict[str, Any]:\n \"\"\"Call underlying forward methods for all tasks and return the result as a dictionary.\"\"\"\n # This method is overridden because we do not need the complex version defined in Metric, that relies on the\n # value of full_state_update, and that also accumulates the results. Here, all computations are handled by the\n # underlying metrics, which all have their own value of full_state_update, and which all accumulate the results\n # by themselves.\n return {\n task_name: metric(task_preds[task_name], task_targets[task_name])\n for task_name, metric in self.task_metrics.items()\n }\n\n def reset(self) -> None:\n \"\"\"Reset all underlying metrics.\"\"\"\n for metric in self.task_metrics.values():\n metric.reset()\n super().reset()\n\n def plot(\n self, val: Optional[Union[Dict, Sequence[Dict]]] = None, axes: Optional[Sequence[_AX_TYPE]] = None\n ) -> Sequence[_PLOT_OUT_TYPE]:\n \"\"\"Plot a single or multiple values from the metric.\n\n All tasks' results are plotted on individual axes.\n\n Args:\n val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.\n If no value is provided, will automatically call `metric.compute` and plot that result.\n axes: Sequence of matplotlib axis objects. If provided, will add the plots to the provided axis objects.\n If not provided, will create them.\n\n Returns:\n Sequence of tuples with Figure and Axes object for each task.\n\n .. plot::\n :scale: 75\n\n >>> # Example plotting a single value\n >>> import torch\n >>> from torchmetrics.wrappers import MultitaskWrapper\n >>> from torchmetrics.regression import MeanSquaredError\n >>> from torchmetrics.classification import BinaryAccuracy\n >>>\n >>> classification_target = torch.tensor([0, 1, 0])\n >>> regression_target = torch.tensor([2.5, 5.0, 4.0])\n >>> targets = {\"Classification\": classification_target, \"Regression\": regression_target}\n >>>\n >>> classification_preds = torch.tensor([0, 0, 1])\n >>> regression_preds = torch.tensor([3.0, 5.0, 2.5])\n >>> preds = {\"Classification\": classification_preds, \"Regression\": regression_preds}\n >>>\n >>> metrics = MultitaskWrapper({\n ... \"Classification\": BinaryAccuracy(),\n ... \"Regression\": MeanSquaredError()\n ... })\n >>> metrics.update(preds, targets)\n >>> value = metrics.compute()\n >>> fig_, ax_ = metrics.plot(value)\n\n .. plot::\n :scale: 75\n\n >>> # Example plotting multiple values\n >>> import torch\n >>> from torchmetrics.wrappers import MultitaskWrapper\n >>> from torchmetrics.regression import MeanSquaredError\n >>> from torchmetrics.classification import BinaryAccuracy\n >>>\n >>> classification_target = torch.tensor([0, 1, 0])\n >>> regression_target = torch.tensor([2.5, 5.0, 4.0])\n >>> targets = {\"Classification\": classification_target, \"Regression\": regression_target}\n >>>\n >>> classification_preds = torch.tensor([0, 0, 1])\n >>> regression_preds = torch.tensor([3.0, 5.0, 2.5])\n >>> preds = {\"Classification\": classification_preds, \"Regression\": regression_preds}\n >>>\n >>> metrics = MultitaskWrapper({\n ... \"Classification\": BinaryAccuracy(),\n ... \"Regression\": MeanSquaredError()\n ... })\n >>> values = []\n >>> for _ in range(10):\n ... values.append(metrics(preds, targets))\n >>> fig_, ax_ = metrics.plot(values)\n\n \"\"\"\n if axes is not None:\n if not isinstance(axes, Sequence):\n raise TypeError(f\"Expected argument `axes` to be a Sequence. Found type(axes) = {type(axes)}\")\n\n if not all(isinstance(ax, _AX_TYPE) for ax in axes):\n raise TypeError(\"Expected each ax in argument `axes` to be a matplotlib axis object\")\n\n if len(axes) != len(self.task_metrics):\n raise ValueError(\n \"Expected argument `axes` to be a Sequence of the same length as the number of tasks.\"\n f\"Found len(axes) = {len(axes)} and {len(self.task_metrics)} tasks\"\n )\n\n val = val if val is not None else self.compute()\n fig_axs = []\n for i, (task_name, task_metric) in enumerate(self.task_metrics.items()):\n ax = axes[i] if axes is not None else None\n if isinstance(val, Dict):\n f, a = task_metric.plot(val[task_name], ax=ax)\n elif isinstance(val, Sequence):\n f, a = task_metric.plot([v[task_name] for v in val], ax=ax)\n else:\n raise TypeError(\n \"Expected argument `val` to be None or of type Dict or Sequence[Dict]. \"\n f\"Found type(val)= {type(val)}\"\n )\n fig_axs.append((f, a))\n return fig_axs\n", "path": "src/torchmetrics/wrappers/multitask.py"}], "after_files": [{"content": "# Copyright The Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# this is just a bypass for this module name collision with built-in one\nfrom copy import deepcopy\nfrom typing import Any, Dict, Iterable, Optional, Sequence, Tuple, Union\n\nfrom torch import Tensor, nn\n\nfrom torchmetrics.collections import MetricCollection\nfrom torchmetrics.metric import Metric\nfrom torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE\nfrom torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE\nfrom torchmetrics.wrappers.abstract import WrapperMetric\n\nif not _MATPLOTLIB_AVAILABLE:\n __doctest_skip__ = [\"MultitaskWrapper.plot\"]\n\n\nclass MultitaskWrapper(WrapperMetric):\n \"\"\"Wrapper class for computing different metrics on different tasks in the context of multitask learning.\n\n In multitask learning the different tasks requires different metrics to be evaluated. This wrapper allows\n for easy evaluation in such cases by supporting multiple predictions and targets through a dictionary.\n Note that only metrics where the signature of `update` follows the standard `preds, target` is supported.\n\n Args:\n task_metrics:\n Dictionary associating each task to a Metric or a MetricCollection. The keys of the dictionary represent the\n names of the tasks, and the values represent the metrics to use for each task.\n\n Raises:\n TypeError:\n If argument `task_metrics` is not an dictionary\n TypeError:\n If not all values in the `task_metrics` dictionary is instances of `Metric` or `MetricCollection`\n\n Example (with a single metric per class):\n >>> import torch\n >>> from torchmetrics.wrappers import MultitaskWrapper\n >>> from torchmetrics.regression import MeanSquaredError\n >>> from torchmetrics.classification import BinaryAccuracy\n >>>\n >>> classification_target = torch.tensor([0, 1, 0])\n >>> regression_target = torch.tensor([2.5, 5.0, 4.0])\n >>> targets = {\"Classification\": classification_target, \"Regression\": regression_target}\n >>>\n >>> classification_preds = torch.tensor([0, 0, 1])\n >>> regression_preds = torch.tensor([3.0, 5.0, 2.5])\n >>> preds = {\"Classification\": classification_preds, \"Regression\": regression_preds}\n >>>\n >>> metrics = MultitaskWrapper({\n ... \"Classification\": BinaryAccuracy(),\n ... \"Regression\": MeanSquaredError()\n ... })\n >>> metrics.update(preds, targets)\n >>> metrics.compute()\n {'Classification': tensor(0.3333), 'Regression': tensor(0.8333)}\n\n Example (with several metrics per task):\n >>> import torch\n >>> from torchmetrics import MetricCollection\n >>> from torchmetrics.wrappers import MultitaskWrapper\n >>> from torchmetrics.regression import MeanSquaredError, MeanAbsoluteError\n >>> from torchmetrics.classification import BinaryAccuracy, BinaryF1Score\n >>>\n >>> classification_target = torch.tensor([0, 1, 0])\n >>> regression_target = torch.tensor([2.5, 5.0, 4.0])\n >>> targets = {\"Classification\": classification_target, \"Regression\": regression_target}\n >>>\n >>> classification_preds = torch.tensor([0, 0, 1])\n >>> regression_preds = torch.tensor([3.0, 5.0, 2.5])\n >>> preds = {\"Classification\": classification_preds, \"Regression\": regression_preds}\n >>>\n >>> metrics = MultitaskWrapper({\n ... \"Classification\": MetricCollection(BinaryAccuracy(), BinaryF1Score()),\n ... \"Regression\": MetricCollection(MeanSquaredError(), MeanAbsoluteError())\n ... })\n >>> metrics.update(preds, targets)\n >>> metrics.compute()\n {'Classification': {'BinaryAccuracy': tensor(0.3333), 'BinaryF1Score': tensor(0.)},\n 'Regression': {'MeanSquaredError': tensor(0.8333), 'MeanAbsoluteError': tensor(0.6667)}}\n\n \"\"\"\n\n is_differentiable = False\n\n def __init__(\n self,\n task_metrics: Dict[str, Union[Metric, MetricCollection]],\n ) -> None:\n self._check_task_metrics_type(task_metrics)\n super().__init__()\n self.task_metrics = nn.ModuleDict(task_metrics)\n\n def items(self) -> Iterable[Tuple[str, nn.Module]]:\n \"\"\"Iterate over task and task metrics.\"\"\"\n return self.task_metrics.items()\n\n def keys(self) -> Iterable[str]:\n \"\"\"Iterate over task names.\"\"\"\n return self.task_metrics.keys()\n\n def values(self) -> Iterable[nn.Module]:\n \"\"\"Iterate over task metrics.\"\"\"\n return self.task_metrics.values()\n\n @staticmethod\n def _check_task_metrics_type(task_metrics: Dict[str, Union[Metric, MetricCollection]]) -> None:\n if not isinstance(task_metrics, dict):\n raise TypeError(f\"Expected argument `task_metrics` to be a dict. Found task_metrics = {task_metrics}\")\n\n for metric in task_metrics.values():\n if not (isinstance(metric, (Metric, MetricCollection))):\n raise TypeError(\n \"Expected each task's metric to be a Metric or a MetricCollection. \"\n f\"Found a metric of type {type(metric)}\"\n )\n\n def update(self, task_preds: Dict[str, Tensor], task_targets: Dict[str, Tensor]) -> None:\n \"\"\"Update each task's metric with its corresponding pred and target.\n\n Args:\n task_preds: Dictionary associating each task to a Tensor of pred.\n task_targets: Dictionary associating each task to a Tensor of target.\n\n \"\"\"\n if not self.task_metrics.keys() == task_preds.keys() == task_targets.keys():\n raise ValueError(\n \"Expected arguments `task_preds` and `task_targets` to have the same keys as the wrapped `task_metrics`\"\n f\". Found task_preds.keys() = {task_preds.keys()}, task_targets.keys() = {task_targets.keys()} \"\n f\"and self.task_metrics.keys() = {self.task_metrics.keys()}\"\n )\n\n for task_name, metric in self.task_metrics.items():\n pred = task_preds[task_name]\n target = task_targets[task_name]\n metric.update(pred, target)\n\n def compute(self) -> Dict[str, Any]:\n \"\"\"Compute metrics for all tasks.\"\"\"\n return {task_name: metric.compute() for task_name, metric in self.task_metrics.items()}\n\n def forward(self, task_preds: Dict[str, Tensor], task_targets: Dict[str, Tensor]) -> Dict[str, Any]:\n \"\"\"Call underlying forward methods for all tasks and return the result as a dictionary.\"\"\"\n # This method is overridden because we do not need the complex version defined in Metric, that relies on the\n # value of full_state_update, and that also accumulates the results. Here, all computations are handled by the\n # underlying metrics, which all have their own value of full_state_update, and which all accumulate the results\n # by themselves.\n return {\n task_name: metric(task_preds[task_name], task_targets[task_name])\n for task_name, metric in self.task_metrics.items()\n }\n\n def reset(self) -> None:\n \"\"\"Reset all underlying metrics.\"\"\"\n for metric in self.task_metrics.values():\n metric.reset()\n super().reset()\n\n @staticmethod\n def _check_arg(arg: Optional[str], name: str) -> Optional[str]:\n if arg is None or isinstance(arg, str):\n return arg\n raise ValueError(f\"Expected input `{name}` to be a string, but got {type(arg)}\")\n\n def clone(self, prefix: Optional[str] = None, postfix: Optional[str] = None) -> \"MultitaskWrapper\":\n \"\"\"Make a copy of the metric.\n\n Args:\n prefix: a string to append in front of the metric keys\n postfix: a string to append after the keys of the output dict.\n\n \"\"\"\n multitask_copy = deepcopy(self)\n if prefix is not None:\n prefix = self._check_arg(prefix, \"prefix\")\n multitask_copy.task_metrics = nn.ModuleDict(\n {prefix + key: value for key, value in multitask_copy.task_metrics.items()}\n )\n if postfix is not None:\n postfix = self._check_arg(postfix, \"postfix\")\n multitask_copy.task_metrics = nn.ModuleDict(\n {key + postfix: value for key, value in multitask_copy.task_metrics.items()}\n )\n return multitask_copy\n\n def plot(\n self, val: Optional[Union[Dict, Sequence[Dict]]] = None, axes: Optional[Sequence[_AX_TYPE]] = None\n ) -> Sequence[_PLOT_OUT_TYPE]:\n \"\"\"Plot a single or multiple values from the metric.\n\n All tasks' results are plotted on individual axes.\n\n Args:\n val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.\n If no value is provided, will automatically call `metric.compute` and plot that result.\n axes: Sequence of matplotlib axis objects. If provided, will add the plots to the provided axis objects.\n If not provided, will create them.\n\n Returns:\n Sequence of tuples with Figure and Axes object for each task.\n\n .. plot::\n :scale: 75\n\n >>> # Example plotting a single value\n >>> import torch\n >>> from torchmetrics.wrappers import MultitaskWrapper\n >>> from torchmetrics.regression import MeanSquaredError\n >>> from torchmetrics.classification import BinaryAccuracy\n >>>\n >>> classification_target = torch.tensor([0, 1, 0])\n >>> regression_target = torch.tensor([2.5, 5.0, 4.0])\n >>> targets = {\"Classification\": classification_target, \"Regression\": regression_target}\n >>>\n >>> classification_preds = torch.tensor([0, 0, 1])\n >>> regression_preds = torch.tensor([3.0, 5.0, 2.5])\n >>> preds = {\"Classification\": classification_preds, \"Regression\": regression_preds}\n >>>\n >>> metrics = MultitaskWrapper({\n ... \"Classification\": BinaryAccuracy(),\n ... \"Regression\": MeanSquaredError()\n ... })\n >>> metrics.update(preds, targets)\n >>> value = metrics.compute()\n >>> fig_, ax_ = metrics.plot(value)\n\n .. plot::\n :scale: 75\n\n >>> # Example plotting multiple values\n >>> import torch\n >>> from torchmetrics.wrappers import MultitaskWrapper\n >>> from torchmetrics.regression import MeanSquaredError\n >>> from torchmetrics.classification import BinaryAccuracy\n >>>\n >>> classification_target = torch.tensor([0, 1, 0])\n >>> regression_target = torch.tensor([2.5, 5.0, 4.0])\n >>> targets = {\"Classification\": classification_target, \"Regression\": regression_target}\n >>>\n >>> classification_preds = torch.tensor([0, 0, 1])\n >>> regression_preds = torch.tensor([3.0, 5.0, 2.5])\n >>> preds = {\"Classification\": classification_preds, \"Regression\": regression_preds}\n >>>\n >>> metrics = MultitaskWrapper({\n ... \"Classification\": BinaryAccuracy(),\n ... \"Regression\": MeanSquaredError()\n ... })\n >>> values = []\n >>> for _ in range(10):\n ... values.append(metrics(preds, targets))\n >>> fig_, ax_ = metrics.plot(values)\n\n \"\"\"\n if axes is not None:\n if not isinstance(axes, Sequence):\n raise TypeError(f\"Expected argument `axes` to be a Sequence. Found type(axes) = {type(axes)}\")\n\n if not all(isinstance(ax, _AX_TYPE) for ax in axes):\n raise TypeError(\"Expected each ax in argument `axes` to be a matplotlib axis object\")\n\n if len(axes) != len(self.task_metrics):\n raise ValueError(\n \"Expected argument `axes` to be a Sequence of the same length as the number of tasks.\"\n f\"Found len(axes) = {len(axes)} and {len(self.task_metrics)} tasks\"\n )\n\n val = val if val is not None else self.compute()\n fig_axs = []\n for i, (task_name, task_metric) in enumerate(self.task_metrics.items()):\n ax = axes[i] if axes is not None else None\n if isinstance(val, Dict):\n f, a = task_metric.plot(val[task_name], ax=ax)\n elif isinstance(val, Sequence):\n f, a = task_metric.plot([v[task_name] for v in val], ax=ax)\n else:\n raise TypeError(\n \"Expected argument `val` to be None or of type Dict or Sequence[Dict]. \"\n f\"Found type(val)= {type(val)}\"\n )\n fig_axs.append((f, a))\n return fig_axs\n", "path": "src/torchmetrics/wrappers/multitask.py"}]}
| 3,941 | 481 |
gh_patches_debug_59407
|
rasdani/github-patches
|
git_diff
|
dynaconf__dynaconf-570
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[bug] Dynaconf searches for a file in various directories even when the absolute path is provided
Dynaconf searches for a file in various directories even when the absolute path is provided.
It can cause the issue when the user performing the command does not have access to a directory dynaconf is searching in.
To reproduce:
- run any django-admin command from a directory where the user performing a command do not have access to
- provide an absolute path to your configuration (`/etc/pulp/settings.py` in the example below)
E.g.
```
# pwd
/root/somedir
# sudo -u pulp stat /root/somedir
stat: cannot stat ‘/root/somedir’: Permission denied
# sudo -u pulp DJANGO_SETTINGS_MODULE=pulpcore.app.settings PULP_SETTINGS=/etc/pulp/settings.py pulpcore-manager
Traceback (most recent call last):
File "/bin/pulpcore-manager", line 11, in <module>
load_entry_point('pulpcore==3.7.4', 'console_scripts', 'pulpcore-manager')()
File "/usr/lib/python3.6/site-packages/pulpcore/app/manage.py", line 11, in manage
execute_from_command_line(sys.argv)
File "/usr/lib/python3.6/site-packages/django/core/management/__init__.py", line 381, in execute_from_command_line
utility.execute()
File "/usr/lib/python3.6/site-packages/django/core/management/__init__.py", line 325, in execute
settings.INSTALLED_APPS
File "/usr/lib/python3.6/site-packages/django/conf/__init__.py", line 79, in __getattr__
self._setup(name)
File "/usr/lib/python3.6/site-packages/django/conf/__init__.py", line 66, in _setup
self._wrapped = Settings(settings_module)
File "/usr/lib/python3.6/site-packages/django/conf/__init__.py", line 157, in __init__
mod = importlib.import_module(self.SETTINGS_MODULE)
File "/usr/lib64/python3.6/importlib/__init__.py", line 126, in import_module
return _bootstrap._gcd_import(name[level:], package, level)
File "<frozen importlib._bootstrap>", line 994, in _gcd_import
File "<frozen importlib._bootstrap>", line 971, in _find_and_load
File "<frozen importlib._bootstrap>", line 955, in _find_and_load_unlocked
File "<frozen importlib._bootstrap>", line 665, in _load_unlocked
File "<frozen importlib._bootstrap_external>", line 678, in exec_module
File "<frozen importlib._bootstrap>", line 219, in _call_with_frames_removed
File "/usr/lib/python3.6/site-packages/pulpcore/app/settings.py", line 249, in <module>
load_dotenv=False,
File "/usr/lib/python3.6/site-packages/dynaconf/contrib/django_dynaconf_v2.py", line 76, in load
lazy_settings.populate_obj(django_settings_module)
File "/usr/lib/python3.6/site-packages/dynaconf/base.py", line 141, in __getattr__
self._setup()
File "/usr/lib/python3.6/site-packages/dynaconf/base.py", line 192, in _setup
settings_module=settings_module, **self._kwargs
File "/usr/lib/python3.6/site-packages/dynaconf/base.py", line 255, in __init__
self.execute_loaders()
File "/usr/lib/python3.6/site-packages/dynaconf/base.py", line 952, in execute_loaders
self, env=env, silent=silent, key=key, filename=filename
File "/usr/lib/python3.6/site-packages/dynaconf/loaders/__init__.py", line 92, in settings_loader
found = obj.find_file(item, project_root=p_root)
File "/usr/lib/python3.6/site-packages/dynaconf/base.py", line 1067, in find_file
return find_file(*args, **kwargs)
File "/usr/lib/python3.6/site-packages/dynaconf/utils/files.py", line 60, in find_file
search_tree.extend(_walk_to_root(work_dir))
File "/usr/lib/python3.6/site-packages/dynaconf/utils/files.py", line 13, in _walk_to_root
raise IOError("Starting path not found")
```
FWIW, here is dynaconf usage in [Pulp's settings.py](https://github.com/pulp/pulpcore/blob/5d63fc9ad78dfb63c68a52c28f21703a90164b08/pulpcore/app/settings.py#L272-L281):
```
settings = dynaconf.DjangoDynaconf(
__name__,
GLOBAL_ENV_FOR_DYNACONF="PULP",
ENV_SWITCHER_FOR_DYNACONF="PULP_ENV",
PRELOAD_FOR_DYNACONF=[
"{}.app.settings".format(plugin_name) for plugin_name in INSTALLED_PULP_PLUGINS
],
ENVVAR_FOR_DYNACONF="PULP_SETTINGS",
load_dotenv=False,
)
```
Dynaconf should not search anywhere if the absolute path is provided.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `dynaconf/utils/files.py`
Content:
```
1 import inspect
2 import io
3 import os
4
5 from dynaconf.utils import deduplicate
6
7
8 def _walk_to_root(path, break_at=None):
9 """
10 Directories starting from the given directory up to the root or break_at
11 """
12 if not os.path.exists(path): # pragma: no cover
13 raise IOError("Starting path not found")
14
15 if os.path.isfile(path): # pragma: no cover
16 path = os.path.dirname(path)
17
18 last_dir = None
19 current_dir = os.path.abspath(path)
20 paths = []
21 while last_dir != current_dir:
22 paths.append(current_dir)
23 paths.append(os.path.join(current_dir, "config"))
24 if break_at and current_dir == os.path.abspath(break_at): # noqa
25 break
26 parent_dir = os.path.abspath(os.path.join(current_dir, os.path.pardir))
27 last_dir, current_dir = current_dir, parent_dir
28 return paths
29
30
31 SEARCHTREE = []
32
33
34 def find_file(filename=".env", project_root=None, skip_files=None, **kwargs):
35 """Search in increasingly higher folders for the given file
36 Returns path to the file if found, or an empty string otherwise.
37
38 This function will build a `search_tree` based on:
39
40 - Project_root if specified
41 - Invoked script location and its parents until root
42 - Current working directory
43
44 For each path in the `search_tree` it will also look for an
45 aditional `./config` folder.
46 """
47 search_tree = []
48 work_dir = os.getcwd()
49 skip_files = skip_files or []
50
51 if project_root is not None:
52 search_tree.extend(_walk_to_root(project_root, break_at=work_dir))
53
54 script_dir = os.path.dirname(os.path.abspath(inspect.stack()[-1].filename))
55
56 # Path to invoked script and recursively to root with its ./config dirs
57 search_tree.extend(_walk_to_root(script_dir))
58
59 # Path to where Python interpreter was invoked and recursively to root
60 search_tree.extend(_walk_to_root(work_dir))
61
62 # Don't look the same place twice
63 search_tree = deduplicate(search_tree)
64
65 global SEARCHTREE
66 SEARCHTREE[:] = search_tree
67
68 for dirname in search_tree:
69 check_path = os.path.join(dirname, filename)
70 if check_path in skip_files:
71 continue
72 if os.path.exists(check_path):
73 return check_path # First found will return
74
75 # return empty string if not found so it can still be joined in os.path
76 return ""
77
78
79 def read_file(path, **kwargs):
80 content = ""
81 with io.open(path, **kwargs) as open_file:
82 content = open_file.read().strip()
83 return content
84
85
86 def get_local_filename(filename):
87 """Takes a filename like `settings.toml` and returns `settings.local.toml`
88
89 Arguments:
90 filename {str} -- The filename or complete path
91
92 Returns:
93 [str] -- The same name or path with `.local.` added.
94 """
95 name, _, extension = os.path.basename(str(filename)).rpartition(
96 os.path.extsep
97 )
98
99 return os.path.join(
100 os.path.dirname(str(filename)), f"{name}.local.{extension}"
101 )
102
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/dynaconf/utils/files.py b/dynaconf/utils/files.py
--- a/dynaconf/utils/files.py
+++ b/dynaconf/utils/files.py
@@ -48,6 +48,12 @@
work_dir = os.getcwd()
skip_files = skip_files or []
+ # If filename is an absolute path and exists, just return it
+ # if the absolute path does not exist, return empty string so
+ # that it can be joined and avoid IoError
+ if os.path.isabs(filename):
+ return filename if os.path.exists(filename) else ""
+
if project_root is not None:
search_tree.extend(_walk_to_root(project_root, break_at=work_dir))
|
{"golden_diff": "diff --git a/dynaconf/utils/files.py b/dynaconf/utils/files.py\n--- a/dynaconf/utils/files.py\n+++ b/dynaconf/utils/files.py\n@@ -48,6 +48,12 @@\n work_dir = os.getcwd()\n skip_files = skip_files or []\n \n+ # If filename is an absolute path and exists, just return it\n+ # if the absolute path does not exist, return empty string so\n+ # that it can be joined and avoid IoError\n+ if os.path.isabs(filename):\n+ return filename if os.path.exists(filename) else \"\"\n+\n if project_root is not None:\n search_tree.extend(_walk_to_root(project_root, break_at=work_dir))\n", "issue": "[bug] Dynaconf searches for a file in various directories even when the absolute path is provided\nDynaconf searches for a file in various directories even when the absolute path is provided.\r\nIt can cause the issue when the user performing the command does not have access to a directory dynaconf is searching in.\r\n\r\nTo reproduce:\r\n - run any django-admin command from a directory where the user performing a command do not have access to\r\n - provide an absolute path to your configuration (`/etc/pulp/settings.py` in the example below)\r\n\r\nE.g.\r\n```\r\n# pwd\r\n/root/somedir\r\n# sudo -u pulp stat /root/somedir\r\nstat: cannot stat \u2018/root/somedir\u2019: Permission denied\r\n# sudo -u pulp DJANGO_SETTINGS_MODULE=pulpcore.app.settings PULP_SETTINGS=/etc/pulp/settings.py pulpcore-manager\r\n\r\nTraceback (most recent call last):\r\n File \"/bin/pulpcore-manager\", line 11, in <module>\r\n load_entry_point('pulpcore==3.7.4', 'console_scripts', 'pulpcore-manager')()\r\n File \"/usr/lib/python3.6/site-packages/pulpcore/app/manage.py\", line 11, in manage\r\n execute_from_command_line(sys.argv)\r\n File \"/usr/lib/python3.6/site-packages/django/core/management/__init__.py\", line 381, in execute_from_command_line\r\n utility.execute()\r\n File \"/usr/lib/python3.6/site-packages/django/core/management/__init__.py\", line 325, in execute\r\n settings.INSTALLED_APPS\r\n File \"/usr/lib/python3.6/site-packages/django/conf/__init__.py\", line 79, in __getattr__\r\n self._setup(name)\r\n File \"/usr/lib/python3.6/site-packages/django/conf/__init__.py\", line 66, in _setup\r\n self._wrapped = Settings(settings_module)\r\n File \"/usr/lib/python3.6/site-packages/django/conf/__init__.py\", line 157, in __init__\r\n mod = importlib.import_module(self.SETTINGS_MODULE)\r\n File \"/usr/lib64/python3.6/importlib/__init__.py\", line 126, in import_module\r\n return _bootstrap._gcd_import(name[level:], package, level)\r\n File \"<frozen importlib._bootstrap>\", line 994, in _gcd_import\r\n File \"<frozen importlib._bootstrap>\", line 971, in _find_and_load\r\n File \"<frozen importlib._bootstrap>\", line 955, in _find_and_load_unlocked\r\n File \"<frozen importlib._bootstrap>\", line 665, in _load_unlocked\r\n File \"<frozen importlib._bootstrap_external>\", line 678, in exec_module\r\n File \"<frozen importlib._bootstrap>\", line 219, in _call_with_frames_removed\r\n File \"/usr/lib/python3.6/site-packages/pulpcore/app/settings.py\", line 249, in <module>\r\n load_dotenv=False,\r\n File \"/usr/lib/python3.6/site-packages/dynaconf/contrib/django_dynaconf_v2.py\", line 76, in load\r\n lazy_settings.populate_obj(django_settings_module)\r\n File \"/usr/lib/python3.6/site-packages/dynaconf/base.py\", line 141, in __getattr__\r\n self._setup()\r\n File \"/usr/lib/python3.6/site-packages/dynaconf/base.py\", line 192, in _setup\r\n settings_module=settings_module, **self._kwargs\r\n File \"/usr/lib/python3.6/site-packages/dynaconf/base.py\", line 255, in __init__\r\n self.execute_loaders()\r\n File \"/usr/lib/python3.6/site-packages/dynaconf/base.py\", line 952, in execute_loaders\r\n self, env=env, silent=silent, key=key, filename=filename\r\n File \"/usr/lib/python3.6/site-packages/dynaconf/loaders/__init__.py\", line 92, in settings_loader\r\n found = obj.find_file(item, project_root=p_root)\r\n File \"/usr/lib/python3.6/site-packages/dynaconf/base.py\", line 1067, in find_file\r\n return find_file(*args, **kwargs)\r\n File \"/usr/lib/python3.6/site-packages/dynaconf/utils/files.py\", line 60, in find_file\r\n search_tree.extend(_walk_to_root(work_dir))\r\n File \"/usr/lib/python3.6/site-packages/dynaconf/utils/files.py\", line 13, in _walk_to_root\r\n raise IOError(\"Starting path not found\")\r\n```\r\n\r\nFWIW, here is dynaconf usage in [Pulp's settings.py](https://github.com/pulp/pulpcore/blob/5d63fc9ad78dfb63c68a52c28f21703a90164b08/pulpcore/app/settings.py#L272-L281):\r\n```\r\n settings = dynaconf.DjangoDynaconf(\r\n __name__,\r\n GLOBAL_ENV_FOR_DYNACONF=\"PULP\",\r\n ENV_SWITCHER_FOR_DYNACONF=\"PULP_ENV\",\r\n PRELOAD_FOR_DYNACONF=[\r\n \"{}.app.settings\".format(plugin_name) for plugin_name in INSTALLED_PULP_PLUGINS\r\n ],\r\n ENVVAR_FOR_DYNACONF=\"PULP_SETTINGS\",\r\n load_dotenv=False,\r\n)\r\n```\r\n\r\nDynaconf should not search anywhere if the absolute path is provided.\r\n\r\n\n", "before_files": [{"content": "import inspect\nimport io\nimport os\n\nfrom dynaconf.utils import deduplicate\n\n\ndef _walk_to_root(path, break_at=None):\n \"\"\"\n Directories starting from the given directory up to the root or break_at\n \"\"\"\n if not os.path.exists(path): # pragma: no cover\n raise IOError(\"Starting path not found\")\n\n if os.path.isfile(path): # pragma: no cover\n path = os.path.dirname(path)\n\n last_dir = None\n current_dir = os.path.abspath(path)\n paths = []\n while last_dir != current_dir:\n paths.append(current_dir)\n paths.append(os.path.join(current_dir, \"config\"))\n if break_at and current_dir == os.path.abspath(break_at): # noqa\n break\n parent_dir = os.path.abspath(os.path.join(current_dir, os.path.pardir))\n last_dir, current_dir = current_dir, parent_dir\n return paths\n\n\nSEARCHTREE = []\n\n\ndef find_file(filename=\".env\", project_root=None, skip_files=None, **kwargs):\n \"\"\"Search in increasingly higher folders for the given file\n Returns path to the file if found, or an empty string otherwise.\n\n This function will build a `search_tree` based on:\n\n - Project_root if specified\n - Invoked script location and its parents until root\n - Current working directory\n\n For each path in the `search_tree` it will also look for an\n aditional `./config` folder.\n \"\"\"\n search_tree = []\n work_dir = os.getcwd()\n skip_files = skip_files or []\n\n if project_root is not None:\n search_tree.extend(_walk_to_root(project_root, break_at=work_dir))\n\n script_dir = os.path.dirname(os.path.abspath(inspect.stack()[-1].filename))\n\n # Path to invoked script and recursively to root with its ./config dirs\n search_tree.extend(_walk_to_root(script_dir))\n\n # Path to where Python interpreter was invoked and recursively to root\n search_tree.extend(_walk_to_root(work_dir))\n\n # Don't look the same place twice\n search_tree = deduplicate(search_tree)\n\n global SEARCHTREE\n SEARCHTREE[:] = search_tree\n\n for dirname in search_tree:\n check_path = os.path.join(dirname, filename)\n if check_path in skip_files:\n continue\n if os.path.exists(check_path):\n return check_path # First found will return\n\n # return empty string if not found so it can still be joined in os.path\n return \"\"\n\n\ndef read_file(path, **kwargs):\n content = \"\"\n with io.open(path, **kwargs) as open_file:\n content = open_file.read().strip()\n return content\n\n\ndef get_local_filename(filename):\n \"\"\"Takes a filename like `settings.toml` and returns `settings.local.toml`\n\n Arguments:\n filename {str} -- The filename or complete path\n\n Returns:\n [str] -- The same name or path with `.local.` added.\n \"\"\"\n name, _, extension = os.path.basename(str(filename)).rpartition(\n os.path.extsep\n )\n\n return os.path.join(\n os.path.dirname(str(filename)), f\"{name}.local.{extension}\"\n )\n", "path": "dynaconf/utils/files.py"}], "after_files": [{"content": "import inspect\nimport io\nimport os\n\nfrom dynaconf.utils import deduplicate\n\n\ndef _walk_to_root(path, break_at=None):\n \"\"\"\n Directories starting from the given directory up to the root or break_at\n \"\"\"\n if not os.path.exists(path): # pragma: no cover\n raise IOError(\"Starting path not found\")\n\n if os.path.isfile(path): # pragma: no cover\n path = os.path.dirname(path)\n\n last_dir = None\n current_dir = os.path.abspath(path)\n paths = []\n while last_dir != current_dir:\n paths.append(current_dir)\n paths.append(os.path.join(current_dir, \"config\"))\n if break_at and current_dir == os.path.abspath(break_at): # noqa\n break\n parent_dir = os.path.abspath(os.path.join(current_dir, os.path.pardir))\n last_dir, current_dir = current_dir, parent_dir\n return paths\n\n\nSEARCHTREE = []\n\n\ndef find_file(filename=\".env\", project_root=None, skip_files=None, **kwargs):\n \"\"\"Search in increasingly higher folders for the given file\n Returns path to the file if found, or an empty string otherwise.\n\n This function will build a `search_tree` based on:\n\n - Project_root if specified\n - Invoked script location and its parents until root\n - Current working directory\n\n For each path in the `search_tree` it will also look for an\n aditional `./config` folder.\n \"\"\"\n search_tree = []\n work_dir = os.getcwd()\n skip_files = skip_files or []\n\n # If filename is an absolute path and exists, just return it\n # if the absolute path does not exist, return empty string so\n # that it can be joined and avoid IoError\n if os.path.isabs(filename):\n return filename if os.path.exists(filename) else \"\"\n\n if project_root is not None:\n search_tree.extend(_walk_to_root(project_root, break_at=work_dir))\n\n script_dir = os.path.dirname(os.path.abspath(inspect.stack()[-1].filename))\n\n # Path to invoked script and recursively to root with its ./config dirs\n search_tree.extend(_walk_to_root(script_dir))\n\n # Path to where Python interpreter was invoked and recursively to root\n search_tree.extend(_walk_to_root(work_dir))\n\n # Don't look the same place twice\n search_tree = deduplicate(search_tree)\n\n global SEARCHTREE\n SEARCHTREE[:] = search_tree\n\n for dirname in search_tree:\n check_path = os.path.join(dirname, filename)\n if check_path in skip_files:\n continue\n if os.path.exists(check_path):\n return check_path # First found will return\n\n # return empty string if not found so it can still be joined in os.path\n return \"\"\n\n\ndef read_file(path, **kwargs):\n content = \"\"\n with io.open(path, **kwargs) as open_file:\n content = open_file.read().strip()\n return content\n\n\ndef get_local_filename(filename):\n \"\"\"Takes a filename like `settings.toml` and returns `settings.local.toml`\n\n Arguments:\n filename {str} -- The filename or complete path\n\n Returns:\n [str] -- The same name or path with `.local.` added.\n \"\"\"\n name, _, extension = os.path.basename(str(filename)).rpartition(\n os.path.extsep\n )\n\n return os.path.join(\n os.path.dirname(str(filename)), f\"{name}.local.{extension}\"\n )\n", "path": "dynaconf/utils/files.py"}]}
| 2,373 | 157 |
gh_patches_debug_25247
|
rasdani/github-patches
|
git_diff
|
mozmeao__snippets-service-746
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add help text with deep links and `[[snippet_id]]` in ASRSnippet

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `snippets/base/admin/adminmodels.py`
Content:
```
1 import re
2
3 from django.contrib import admin
4 from django.db.models import TextField, Q
5 from django.template.loader import get_template
6 from django.utils.safestring import mark_safe
7
8 from reversion.admin import VersionAdmin
9 from django_ace import AceWidget
10 from django_statsd.clients import statsd
11 from jinja2.meta import find_undeclared_variables
12 from django_admin_listfilter_dropdown.filters import RelatedDropdownFilter
13
14 from snippets.base import forms, models
15 from snippets.base.models import JINJA_ENV
16 from snippets.base.admin.filters import ModifiedFilter, ReleaseFilter
17
18
19 MATCH_LOCALE_REGEX = re.compile('(\w+(?:-\w+)*)')
20 RESERVED_VARIABLES = ('_', 'snippet_id')
21
22
23 class ClientMatchRuleAdmin(VersionAdmin, admin.ModelAdmin):
24 list_display = ('description', 'is_exclusion', 'startpage_version', 'name',
25 'version', 'locale', 'appbuildid', 'build_target',
26 'channel', 'os_version', 'distribution',
27 'distribution_version', 'modified')
28 list_filter = ('name', 'version', 'os_version', 'appbuildid',
29 'build_target', 'channel', 'distribution', 'locale')
30 save_on_top = True
31 search_fields = ('description',)
32
33
34 class LogEntryAdmin(admin.ModelAdmin):
35 list_display = ('user', 'content_type', 'object_id', 'object_repr', 'change_message')
36 list_filter = ('user', 'content_type')
37
38
39 class SnippetTemplateVariableInline(admin.TabularInline):
40 model = models.SnippetTemplateVariable
41 formset = forms.SnippetTemplateVariableInlineFormset
42 max_num = 0
43 can_delete = False
44 readonly_fields = ('name',)
45 fields = ('name', 'type', 'order', 'description')
46
47
48 class SnippetTemplateAdmin(VersionAdmin, admin.ModelAdmin):
49 save_on_top = True
50 list_display = ('name', 'priority', 'hidden')
51 list_filter = ('hidden', 'startpage')
52 inlines = (SnippetTemplateVariableInline,)
53 formfield_overrides = {
54 TextField: {'widget': AceWidget(mode='html', theme='github',
55 width='1200px', height='500px')},
56 }
57
58 class Media:
59 css = {
60 'all': ('css/admin.css',)
61 }
62
63 def save_related(self, request, form, formsets, change):
64 """
65 After saving the related objects, remove and add
66 SnippetTemplateVariables depending on how the template code changed.
67 """
68 super(SnippetTemplateAdmin, self).save_related(request, form, formsets,
69 change)
70
71 # Parse the template code and find any undefined variables.
72 ast = JINJA_ENV.env.parse(form.instance.code)
73 new_vars = find_undeclared_variables(ast)
74 var_manager = form.instance.variable_set
75
76 # Filter out reserved variable names.
77 new_vars = [x for x in new_vars if x not in RESERVED_VARIABLES]
78
79 # Delete variables not in the new set.
80 var_manager.filter(~Q(name__in=new_vars)).delete()
81
82 # Create variables that don't exist.
83 for i, variable in enumerate(new_vars, start=1):
84 obj, _ = models.SnippetTemplateVariable.objects.get_or_create(
85 template=form.instance, name=variable)
86 if obj.order == 0:
87 obj.order = i * 10
88 obj.save()
89
90
91 class UploadedFileAdmin(admin.ModelAdmin):
92 readonly_fields = ('url', 'preview', 'snippets')
93 list_display = ('name', 'url', 'preview', 'modified')
94 prepopulated_fields = {'name': ('file',)}
95 form = forms.UploadedFileAdminForm
96
97 def preview(self, obj):
98 template = get_template('base/uploadedfile_preview.jinja')
99 return mark_safe(template.render({'file': obj}))
100
101 def snippets(self, obj):
102 """Snippets using this file."""
103 template = get_template('base/uploadedfile_snippets.jinja')
104 return mark_safe(template.render({'snippets': obj.snippets}))
105
106
107 class AddonAdmin(admin.ModelAdmin):
108 list_display = ('name', 'guid')
109
110
111 class ASRSnippetAdmin(admin.ModelAdmin):
112 form = forms.ASRSnippetAdminForm
113
114 list_display_links = (
115 'id',
116 'name',
117 )
118 list_display = (
119 'id',
120 'name',
121 'status',
122 'modified',
123 )
124 list_filter = (
125 ModifiedFilter,
126 'status',
127 ReleaseFilter,
128 ('template', RelatedDropdownFilter),
129 )
130 search_fields = (
131 'name',
132 )
133 autocomplete_fields = (
134 'campaign',
135 )
136 preserve_filters = True
137 readonly_fields = (
138 'created',
139 'modified',
140 'uuid',
141 'creator',
142 'preview_url',
143 )
144 filter_horizontal = ('locales',)
145 save_on_top = True
146 save_as = True
147 view_on_site = False
148
149 fieldsets = (
150 ('ID', {'fields': ('creator', 'name', 'status', 'preview_url')}),
151 ('Content', {
152 'fields': ('template', 'data'),
153 }),
154 ('Publishing Options', {
155 'fields': ('campaign', 'target', ('publish_start', 'publish_end'), 'locales', 'weight',)
156 }),
157 ('Other Info', {
158 'fields': ('uuid', ('created', 'modified')),
159 'classes': ('collapse',)
160 }),
161 )
162
163 def save_model(self, request, obj, form, change):
164 obj.creator = request.user
165 statsd.incr('save.asrsnippet')
166 super().save_model(request, obj, form, change)
167
168 def preview_url(self, obj):
169 return obj.get_preview_url()
170
171
172 class CampaignAdmin(admin.ModelAdmin):
173 readonly_fields = ('created', 'modified', 'creator',)
174 prepopulated_fields = {'slug': ('name',)}
175
176 fieldsets = (
177 ('ID', {'fields': ('name', 'slug')}),
178 ('Other Info', {
179 'fields': ('creator', ('created', 'modified')),
180 }),
181 )
182 search_fields = (
183 'name',
184 )
185
186 def save_model(self, request, obj, form, change):
187 obj.creator = request.user
188 statsd.incr('save.campaign')
189 super().save_model(request, obj, form, change)
190
191
192 class TargetAdmin(admin.ModelAdmin):
193 form = forms.TargetAdminForm
194 readonly_fields = ('created', 'modified', 'creator', 'jexl_expr')
195
196 fieldsets = (
197 ('ID', {'fields': ('name',)}),
198 ('Product channels', {
199 'description': 'What channels will this snippet be available in?',
200 'fields': (('on_release', 'on_beta', 'on_aurora', 'on_nightly', 'on_esr'),)
201 }),
202 ('Targeting', {
203 'fields': (
204 'filtr_is_default_browser',
205 'filtr_updates_enabled',
206 'filtr_updates_autodownload_enabled',
207 'filtr_profile_age_created',
208 'filtr_firefox_version',
209 'filtr_previous_session_end',
210 'filtr_uses_firefox_sync',
211 'filtr_is_developer',
212 'filtr_current_search_engine',
213 'filtr_browser_addon',
214 'filtr_total_bookmarks_count',
215 )
216 }),
217 ('Other Info', {
218 'fields': ('creator', ('created', 'modified'), 'jexl_expr'),
219 }),
220 )
221
222 def save_model(self, request, obj, form, change):
223 obj.creator = request.user
224 statsd.incr('save.target')
225 super().save_model(request, obj, form, change)
226
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/snippets/base/admin/adminmodels.py b/snippets/base/admin/adminmodels.py
--- a/snippets/base/admin/adminmodels.py
+++ b/snippets/base/admin/adminmodels.py
@@ -149,6 +149,21 @@
fieldsets = (
('ID', {'fields': ('creator', 'name', 'status', 'preview_url')}),
('Content', {
+ 'description': (
+ '''
+ <strong>Available deep links:</strong><br/>
+ <ol>
+ <li><code>special:accounts</code> to open Firefox Accounts</li>
+ <li><code>special:appMenu</code> to open the hamburger menu</li>
+ </ol><br/>
+ <strong>Automatically add Snippet ID:</strong><br/>
+ You can use <code>[[snippet_id]]</code> in any field and it
+ will be automatically replaced by Snippet ID when served to users.
+ <br/>
+ Example: This is a <code><a href="https://example.com?utm_term=[[snippet_id]]">link</a></code> # noqa
+ <br/>
+ '''
+ ),
'fields': ('template', 'data'),
}),
('Publishing Options', {
@@ -160,6 +175,11 @@
}),
)
+ class Media:
+ css = {
+ 'all': ('css/admin/ASRSnippetAdmin.css',)
+ }
+
def save_model(self, request, obj, form, change):
obj.creator = request.user
statsd.incr('save.asrsnippet')
|
{"golden_diff": "diff --git a/snippets/base/admin/adminmodels.py b/snippets/base/admin/adminmodels.py\n--- a/snippets/base/admin/adminmodels.py\n+++ b/snippets/base/admin/adminmodels.py\n@@ -149,6 +149,21 @@\n fieldsets = (\n ('ID', {'fields': ('creator', 'name', 'status', 'preview_url')}),\n ('Content', {\n+ 'description': (\n+ '''\n+ <strong>Available deep links:</strong><br/>\n+ <ol>\n+ <li><code>special:accounts</code> to open Firefox Accounts</li>\n+ <li><code>special:appMenu</code> to open the hamburger menu</li>\n+ </ol><br/>\n+ <strong>Automatically add Snippet ID:</strong><br/>\n+ You can use <code>[[snippet_id]]</code> in any field and it\n+ will be automatically replaced by Snippet ID when served to users.\n+ <br/>\n+ Example: This is a <code><a href="https://example.com?utm_term=[[snippet_id]]">link</a></code> # noqa\n+ <br/>\n+ '''\n+ ),\n 'fields': ('template', 'data'),\n }),\n ('Publishing Options', {\n@@ -160,6 +175,11 @@\n }),\n )\n \n+ class Media:\n+ css = {\n+ 'all': ('css/admin/ASRSnippetAdmin.css',)\n+ }\n+\n def save_model(self, request, obj, form, change):\n obj.creator = request.user\n statsd.incr('save.asrsnippet')\n", "issue": "Add help text with deep links and `[[snippet_id]]` in ASRSnippet\n\r\n\n", "before_files": [{"content": "import re\n\nfrom django.contrib import admin\nfrom django.db.models import TextField, Q\nfrom django.template.loader import get_template\nfrom django.utils.safestring import mark_safe\n\nfrom reversion.admin import VersionAdmin\nfrom django_ace import AceWidget\nfrom django_statsd.clients import statsd\nfrom jinja2.meta import find_undeclared_variables\nfrom django_admin_listfilter_dropdown.filters import RelatedDropdownFilter\n\nfrom snippets.base import forms, models\nfrom snippets.base.models import JINJA_ENV\nfrom snippets.base.admin.filters import ModifiedFilter, ReleaseFilter\n\n\nMATCH_LOCALE_REGEX = re.compile('(\\w+(?:-\\w+)*)')\nRESERVED_VARIABLES = ('_', 'snippet_id')\n\n\nclass ClientMatchRuleAdmin(VersionAdmin, admin.ModelAdmin):\n list_display = ('description', 'is_exclusion', 'startpage_version', 'name',\n 'version', 'locale', 'appbuildid', 'build_target',\n 'channel', 'os_version', 'distribution',\n 'distribution_version', 'modified')\n list_filter = ('name', 'version', 'os_version', 'appbuildid',\n 'build_target', 'channel', 'distribution', 'locale')\n save_on_top = True\n search_fields = ('description',)\n\n\nclass LogEntryAdmin(admin.ModelAdmin):\n list_display = ('user', 'content_type', 'object_id', 'object_repr', 'change_message')\n list_filter = ('user', 'content_type')\n\n\nclass SnippetTemplateVariableInline(admin.TabularInline):\n model = models.SnippetTemplateVariable\n formset = forms.SnippetTemplateVariableInlineFormset\n max_num = 0\n can_delete = False\n readonly_fields = ('name',)\n fields = ('name', 'type', 'order', 'description')\n\n\nclass SnippetTemplateAdmin(VersionAdmin, admin.ModelAdmin):\n save_on_top = True\n list_display = ('name', 'priority', 'hidden')\n list_filter = ('hidden', 'startpage')\n inlines = (SnippetTemplateVariableInline,)\n formfield_overrides = {\n TextField: {'widget': AceWidget(mode='html', theme='github',\n width='1200px', height='500px')},\n }\n\n class Media:\n css = {\n 'all': ('css/admin.css',)\n }\n\n def save_related(self, request, form, formsets, change):\n \"\"\"\n After saving the related objects, remove and add\n SnippetTemplateVariables depending on how the template code changed.\n \"\"\"\n super(SnippetTemplateAdmin, self).save_related(request, form, formsets,\n change)\n\n # Parse the template code and find any undefined variables.\n ast = JINJA_ENV.env.parse(form.instance.code)\n new_vars = find_undeclared_variables(ast)\n var_manager = form.instance.variable_set\n\n # Filter out reserved variable names.\n new_vars = [x for x in new_vars if x not in RESERVED_VARIABLES]\n\n # Delete variables not in the new set.\n var_manager.filter(~Q(name__in=new_vars)).delete()\n\n # Create variables that don't exist.\n for i, variable in enumerate(new_vars, start=1):\n obj, _ = models.SnippetTemplateVariable.objects.get_or_create(\n template=form.instance, name=variable)\n if obj.order == 0:\n obj.order = i * 10\n obj.save()\n\n\nclass UploadedFileAdmin(admin.ModelAdmin):\n readonly_fields = ('url', 'preview', 'snippets')\n list_display = ('name', 'url', 'preview', 'modified')\n prepopulated_fields = {'name': ('file',)}\n form = forms.UploadedFileAdminForm\n\n def preview(self, obj):\n template = get_template('base/uploadedfile_preview.jinja')\n return mark_safe(template.render({'file': obj}))\n\n def snippets(self, obj):\n \"\"\"Snippets using this file.\"\"\"\n template = get_template('base/uploadedfile_snippets.jinja')\n return mark_safe(template.render({'snippets': obj.snippets}))\n\n\nclass AddonAdmin(admin.ModelAdmin):\n list_display = ('name', 'guid')\n\n\nclass ASRSnippetAdmin(admin.ModelAdmin):\n form = forms.ASRSnippetAdminForm\n\n list_display_links = (\n 'id',\n 'name',\n )\n list_display = (\n 'id',\n 'name',\n 'status',\n 'modified',\n )\n list_filter = (\n ModifiedFilter,\n 'status',\n ReleaseFilter,\n ('template', RelatedDropdownFilter),\n )\n search_fields = (\n 'name',\n )\n autocomplete_fields = (\n 'campaign',\n )\n preserve_filters = True\n readonly_fields = (\n 'created',\n 'modified',\n 'uuid',\n 'creator',\n 'preview_url',\n )\n filter_horizontal = ('locales',)\n save_on_top = True\n save_as = True\n view_on_site = False\n\n fieldsets = (\n ('ID', {'fields': ('creator', 'name', 'status', 'preview_url')}),\n ('Content', {\n 'fields': ('template', 'data'),\n }),\n ('Publishing Options', {\n 'fields': ('campaign', 'target', ('publish_start', 'publish_end'), 'locales', 'weight',)\n }),\n ('Other Info', {\n 'fields': ('uuid', ('created', 'modified')),\n 'classes': ('collapse',)\n }),\n )\n\n def save_model(self, request, obj, form, change):\n obj.creator = request.user\n statsd.incr('save.asrsnippet')\n super().save_model(request, obj, form, change)\n\n def preview_url(self, obj):\n return obj.get_preview_url()\n\n\nclass CampaignAdmin(admin.ModelAdmin):\n readonly_fields = ('created', 'modified', 'creator',)\n prepopulated_fields = {'slug': ('name',)}\n\n fieldsets = (\n ('ID', {'fields': ('name', 'slug')}),\n ('Other Info', {\n 'fields': ('creator', ('created', 'modified')),\n }),\n )\n search_fields = (\n 'name',\n )\n\n def save_model(self, request, obj, form, change):\n obj.creator = request.user\n statsd.incr('save.campaign')\n super().save_model(request, obj, form, change)\n\n\nclass TargetAdmin(admin.ModelAdmin):\n form = forms.TargetAdminForm\n readonly_fields = ('created', 'modified', 'creator', 'jexl_expr')\n\n fieldsets = (\n ('ID', {'fields': ('name',)}),\n ('Product channels', {\n 'description': 'What channels will this snippet be available in?',\n 'fields': (('on_release', 'on_beta', 'on_aurora', 'on_nightly', 'on_esr'),)\n }),\n ('Targeting', {\n 'fields': (\n 'filtr_is_default_browser',\n 'filtr_updates_enabled',\n 'filtr_updates_autodownload_enabled',\n 'filtr_profile_age_created',\n 'filtr_firefox_version',\n 'filtr_previous_session_end',\n 'filtr_uses_firefox_sync',\n 'filtr_is_developer',\n 'filtr_current_search_engine',\n 'filtr_browser_addon',\n 'filtr_total_bookmarks_count',\n )\n }),\n ('Other Info', {\n 'fields': ('creator', ('created', 'modified'), 'jexl_expr'),\n }),\n )\n\n def save_model(self, request, obj, form, change):\n obj.creator = request.user\n statsd.incr('save.target')\n super().save_model(request, obj, form, change)\n", "path": "snippets/base/admin/adminmodels.py"}], "after_files": [{"content": "import re\n\nfrom django.contrib import admin\nfrom django.db.models import TextField, Q\nfrom django.template.loader import get_template\nfrom django.utils.safestring import mark_safe\n\nfrom reversion.admin import VersionAdmin\nfrom django_ace import AceWidget\nfrom django_statsd.clients import statsd\nfrom jinja2.meta import find_undeclared_variables\nfrom django_admin_listfilter_dropdown.filters import RelatedDropdownFilter\n\nfrom snippets.base import forms, models\nfrom snippets.base.models import JINJA_ENV\nfrom snippets.base.admin.filters import ModifiedFilter, ReleaseFilter\n\n\nMATCH_LOCALE_REGEX = re.compile('(\\w+(?:-\\w+)*)')\nRESERVED_VARIABLES = ('_', 'snippet_id')\n\n\nclass ClientMatchRuleAdmin(VersionAdmin, admin.ModelAdmin):\n list_display = ('description', 'is_exclusion', 'startpage_version', 'name',\n 'version', 'locale', 'appbuildid', 'build_target',\n 'channel', 'os_version', 'distribution',\n 'distribution_version', 'modified')\n list_filter = ('name', 'version', 'os_version', 'appbuildid',\n 'build_target', 'channel', 'distribution', 'locale')\n save_on_top = True\n search_fields = ('description',)\n\n\nclass LogEntryAdmin(admin.ModelAdmin):\n list_display = ('user', 'content_type', 'object_id', 'object_repr', 'change_message')\n list_filter = ('user', 'content_type')\n\n\nclass SnippetTemplateVariableInline(admin.TabularInline):\n model = models.SnippetTemplateVariable\n formset = forms.SnippetTemplateVariableInlineFormset\n max_num = 0\n can_delete = False\n readonly_fields = ('name',)\n fields = ('name', 'type', 'order', 'description')\n\n\nclass SnippetTemplateAdmin(VersionAdmin, admin.ModelAdmin):\n save_on_top = True\n list_display = ('name', 'priority', 'hidden')\n list_filter = ('hidden', 'startpage')\n inlines = (SnippetTemplateVariableInline,)\n formfield_overrides = {\n TextField: {'widget': AceWidget(mode='html', theme='github',\n width='1200px', height='500px')},\n }\n\n class Media:\n css = {\n 'all': ('css/admin.css',)\n }\n\n def save_related(self, request, form, formsets, change):\n \"\"\"\n After saving the related objects, remove and add\n SnippetTemplateVariables depending on how the template code changed.\n \"\"\"\n super(SnippetTemplateAdmin, self).save_related(request, form, formsets,\n change)\n\n # Parse the template code and find any undefined variables.\n ast = JINJA_ENV.env.parse(form.instance.code)\n new_vars = find_undeclared_variables(ast)\n var_manager = form.instance.variable_set\n\n # Filter out reserved variable names.\n new_vars = [x for x in new_vars if x not in RESERVED_VARIABLES]\n\n # Delete variables not in the new set.\n var_manager.filter(~Q(name__in=new_vars)).delete()\n\n # Create variables that don't exist.\n for i, variable in enumerate(new_vars, start=1):\n obj, _ = models.SnippetTemplateVariable.objects.get_or_create(\n template=form.instance, name=variable)\n if obj.order == 0:\n obj.order = i * 10\n obj.save()\n\n\nclass UploadedFileAdmin(admin.ModelAdmin):\n readonly_fields = ('url', 'preview', 'snippets')\n list_display = ('name', 'url', 'preview', 'modified')\n prepopulated_fields = {'name': ('file',)}\n form = forms.UploadedFileAdminForm\n\n def preview(self, obj):\n template = get_template('base/uploadedfile_preview.jinja')\n return mark_safe(template.render({'file': obj}))\n\n def snippets(self, obj):\n \"\"\"Snippets using this file.\"\"\"\n template = get_template('base/uploadedfile_snippets.jinja')\n return mark_safe(template.render({'snippets': obj.snippets}))\n\n\nclass AddonAdmin(admin.ModelAdmin):\n list_display = ('name', 'guid')\n\n\nclass ASRSnippetAdmin(admin.ModelAdmin):\n form = forms.ASRSnippetAdminForm\n\n list_display_links = (\n 'id',\n 'name',\n )\n list_display = (\n 'id',\n 'name',\n 'status',\n 'modified',\n )\n list_filter = (\n ModifiedFilter,\n 'status',\n ReleaseFilter,\n ('template', RelatedDropdownFilter),\n )\n search_fields = (\n 'name',\n )\n autocomplete_fields = (\n 'campaign',\n )\n preserve_filters = True\n readonly_fields = (\n 'created',\n 'modified',\n 'uuid',\n 'creator',\n 'preview_url',\n )\n filter_horizontal = ('locales',)\n save_on_top = True\n save_as = True\n view_on_site = False\n\n fieldsets = (\n ('ID', {'fields': ('creator', 'name', 'status', 'preview_url')}),\n ('Content', {\n 'description': (\n '''\n <strong>Available deep links:</strong><br/>\n <ol>\n <li><code>special:accounts</code> to open Firefox Accounts</li>\n <li><code>special:appMenu</code> to open the hamburger menu</li>\n </ol><br/>\n <strong>Automatically add Snippet ID:</strong><br/>\n You can use <code>[[snippet_id]]</code> in any field and it\n will be automatically replaced by Snippet ID when served to users.\n <br/>\n Example: This is a <code><a href="https://example.com?utm_term=[[snippet_id]]">link</a></code> # noqa\n <br/>\n '''\n ),\n 'fields': ('template', 'data'),\n }),\n ('Publishing Options', {\n 'fields': ('campaign', 'target', ('publish_start', 'publish_end'), 'locales', 'weight',)\n }),\n ('Other Info', {\n 'fields': ('uuid', ('created', 'modified')),\n 'classes': ('collapse',)\n }),\n )\n\n class Media:\n css = {\n 'all': ('css/admin/ASRSnippetAdmin.css',)\n }\n\n def save_model(self, request, obj, form, change):\n obj.creator = request.user\n statsd.incr('save.asrsnippet')\n super().save_model(request, obj, form, change)\n\n def preview_url(self, obj):\n return obj.get_preview_url()\n\n\nclass CampaignAdmin(admin.ModelAdmin):\n readonly_fields = ('created', 'modified', 'creator',)\n prepopulated_fields = {'slug': ('name',)}\n\n fieldsets = (\n ('ID', {'fields': ('name', 'slug')}),\n ('Other Info', {\n 'fields': ('creator', ('created', 'modified')),\n }),\n )\n search_fields = (\n 'name',\n )\n\n def save_model(self, request, obj, form, change):\n obj.creator = request.user\n statsd.incr('save.campaign')\n super().save_model(request, obj, form, change)\n\n\nclass TargetAdmin(admin.ModelAdmin):\n form = forms.TargetAdminForm\n readonly_fields = ('created', 'modified', 'creator', 'jexl_expr')\n\n fieldsets = (\n ('ID', {'fields': ('name',)}),\n ('Product channels', {\n 'description': 'What channels will this snippet be available in?',\n 'fields': (('on_release', 'on_beta', 'on_aurora', 'on_nightly', 'on_esr'),)\n }),\n ('Targeting', {\n 'fields': (\n 'filtr_is_default_browser',\n 'filtr_updates_enabled',\n 'filtr_updates_autodownload_enabled',\n 'filtr_profile_age_created',\n 'filtr_firefox_version',\n 'filtr_previous_session_end',\n 'filtr_uses_firefox_sync',\n 'filtr_is_developer',\n 'filtr_current_search_engine',\n 'filtr_browser_addon',\n 'filtr_total_bookmarks_count',\n )\n }),\n ('Other Info', {\n 'fields': ('creator', ('created', 'modified'), 'jexl_expr'),\n }),\n )\n\n def save_model(self, request, obj, form, change):\n obj.creator = request.user\n statsd.incr('save.target')\n super().save_model(request, obj, form, change)\n", "path": "snippets/base/admin/adminmodels.py"}]}
| 2,550 | 376 |
gh_patches_debug_64324
|
rasdani/github-patches
|
git_diff
|
pex-tool__pex-630
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Release 1.6.0
On the docket:
+ (longterm fix) unhandled AttributeError during pex bootstrapping with PEX_PATH #598
+ Vendor setuptools / wheel. #607
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pex/version.py`
Content:
```
1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 __version__ = '1.5.3'
5
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pex/version.py b/pex/version.py
--- a/pex/version.py
+++ b/pex/version.py
@@ -1,4 +1,4 @@
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
-__version__ = '1.5.3'
+__version__ = '1.6.0'
|
{"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = '1.5.3'\n+__version__ = '1.6.0'\n", "issue": "Release 1.6.0\nOn the docket:\r\n+ (longterm fix) unhandled AttributeError during pex bootstrapping with PEX_PATH #598\r\n+ Vendor setuptools / wheel. #607\n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = '1.5.3'\n", "path": "pex/version.py"}], "after_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = '1.6.0'\n", "path": "pex/version.py"}]}
| 354 | 94 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.