problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.1k
10.2k
| golden_diff
stringlengths 151
4.94k
| verification_info
stringlengths 582
21k
| num_tokens
int64 271
2.05k
| num_tokens_diff
int64 47
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_715
|
rasdani/github-patches
|
git_diff
|
xorbitsai__inference-1092
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
DOC: Add doc about installing from source code
Note that the issue tracker is NOT the place for general support.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `doc/source/conf.py`
Content:
```
1 # Configuration file for the Sphinx documentation builder.
2 #
3 # This file only contains a selection of the most common options. For a full
4 # list see the documentation:
5 # https://www.sphinx-doc.org/en/master/usage/configuration.html
6
7 # -- Path setup --------------------------------------------------------------
8
9 # If extensions (or modules to document with autodoc) are in another directory,
10 # add these directories to sys.path here. If the directory is relative to the
11 # documentation root, use os.path.abspath to make it absolute, like shown here.
12 #
13 import os
14 # import sys
15 # sys.path.insert(0, os.path.abspath('.'))
16
17
18 # -- Project information -----------------------------------------------------
19
20 project = 'Xinference'
21 copyright = '2023, Xorbits Inc.'
22 author = 'xorbitsai'
23
24
25 # -- General configuration ---------------------------------------------------
26
27 # Add any Sphinx extension module names here, as strings. They can be
28 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
29 # ones.
30 extensions = [
31 "sphinx.ext.mathjax",
32 "sphinx.ext.ifconfig",
33 "sphinx.ext.intersphinx",
34 "sphinx.ext.viewcode",
35 "sphinx.ext.githubpages",
36 "sphinx.ext.autosummary",
37 "sphinx.ext.napoleon",
38 "sphinx_tabs.tabs",
39 "sphinx_design",
40 "IPython.sphinxext.ipython_directive",
41 "IPython.sphinxext.ipython_console_highlighting",
42 ]
43
44 # Add any paths that contain templates here, relative to this directory.
45 templates_path = ['_templates']
46
47 # List of patterns, relative to source directory, that match files and
48 # directories to ignore when looking for source files.
49 # This pattern also affects html_static_path and html_extra_path.
50 exclude_patterns = []
51
52 # i18n
53 locale_dirs = ["locale/"] # path is example but recommended.
54 gettext_compact = False # optional
55
56
57 # -- Options for HTML output -------------------------------------------------
58
59 # The theme to use for HTML and HTML Help pages. See the documentation for
60 # a list of builtin themes.
61 #
62 html_theme = 'pydata_sphinx_theme'
63 html_title = "Xinference"
64
65 # Add any paths that contain custom static files (such as style sheets) here,
66 # relative to this directory. They are copied after the builtin static files,
67 # so a file named "default.css" will overwrite the builtin "default.css".
68 html_static_path = ['_static']
69
70 # Define the json_url for our version switcher.
71 version_match = os.environ.get("READTHEDOCS_LANGUAGE")
72 json_url = "https://inference.readthedocs.io/en/latest/_static/switcher.json"
73 if not version_match:
74 version_match = 'en'
75
76 html_theme_options = {
77 "show_toc_level": 2,
78 "header_links_before_dropdown": 6,
79 "icon_links": [
80 {
81 "name": "GitHub",
82 "url": "https://github.com/xorbitsai/inference",
83 "icon": "fa-brands fa-github",
84 "type": "fontawesome",
85 },
86 ],
87 "navbar_align": "content", # [left, content, right] For testing that the navbar items align properly
88 "navbar_start": ["navbar-logo", "version-switcher"],
89 "navbar_center": ["navbar-nav"],
90 "switcher": {
91 "json_url": json_url,
92 "version_match": version_match,
93 },
94 }
95
96
97 if version_match != 'zh-cn':
98 html_theme_options['icon_links'].extend([{
99 "name": "Slack",
100 "url": "https://join.slack.com/t/xorbitsio/shared_invite/zt-1o3z9ucdh-RbfhbPVpx7prOVdM1CAuxg",
101 "icon": "fa-brands fa-slack",
102 "type": "fontawesome",
103 },
104 {
105 "name": "Twitter",
106 "url": "https://twitter.com/xorbitsio",
107 "icon": "fa-brands fa-twitter",
108 "type": "fontawesome",
109 }])
110 else:
111 html_theme_options['icon_links'].extend([{
112 "name": "WeChat",
113 "url": "https://xorbits.cn/assets/images/wechat_work_qr.png",
114 "icon": "fa-brands fa-weixin",
115 "type": "fontawesome",
116 },
117 {
118 "name": "Zhihu",
119 "url": "https://zhihu.com/org/xorbits",
120 "icon": "fa-brands fa-zhihu",
121 "type": "fontawesome",
122 }])
123 html_theme_options["external_links"] = [
124 {"name": "产品官网", "url": "https://xorbits.cn/inference"},
125 ]
126
127 html_favicon = "_static/favicon.svg"
128
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/doc/source/conf.py b/doc/source/conf.py
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -75,7 +75,7 @@
html_theme_options = {
"show_toc_level": 2,
- "header_links_before_dropdown": 6,
+ "header_links_before_dropdown": 7,
"icon_links": [
{
"name": "GitHub",
|
{"golden_diff": "diff --git a/doc/source/conf.py b/doc/source/conf.py\n--- a/doc/source/conf.py\n+++ b/doc/source/conf.py\n@@ -75,7 +75,7 @@\n \n html_theme_options = {\n \"show_toc_level\": 2,\n- \"header_links_before_dropdown\": 6,\n+ \"header_links_before_dropdown\": 7,\n \"icon_links\": [\n {\n \"name\": \"GitHub\",\n", "issue": "DOC: Add doc about installing from source code\nNote that the issue tracker is NOT the place for general support.\r\n\n", "before_files": [{"content": "# Configuration file for the Sphinx documentation builder.\n#\n# This file only contains a selection of the most common options. For a full\n# list see the documentation:\n# https://www.sphinx-doc.org/en/master/usage/configuration.html\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\nimport os\n# import sys\n# sys.path.insert(0, os.path.abspath('.'))\n\n\n# -- Project information -----------------------------------------------------\n\nproject = 'Xinference'\ncopyright = '2023, Xorbits Inc.'\nauthor = 'xorbitsai'\n\n\n# -- General configuration ---------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n \"sphinx.ext.mathjax\",\n \"sphinx.ext.ifconfig\",\n \"sphinx.ext.intersphinx\",\n \"sphinx.ext.viewcode\",\n \"sphinx.ext.githubpages\",\n \"sphinx.ext.autosummary\",\n \"sphinx.ext.napoleon\",\n \"sphinx_tabs.tabs\",\n \"sphinx_design\",\n \"IPython.sphinxext.ipython_directive\",\n \"IPython.sphinxext.ipython_console_highlighting\",\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = []\n\n# i18n\nlocale_dirs = [\"locale/\"] # path is example but recommended.\ngettext_compact = False # optional\n\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = 'pydata_sphinx_theme'\nhtml_title = \"Xinference\"\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\n# Define the json_url for our version switcher.\nversion_match = os.environ.get(\"READTHEDOCS_LANGUAGE\")\njson_url = \"https://inference.readthedocs.io/en/latest/_static/switcher.json\"\nif not version_match:\n version_match = 'en'\n\nhtml_theme_options = {\n \"show_toc_level\": 2,\n \"header_links_before_dropdown\": 6,\n \"icon_links\": [\n {\n \"name\": \"GitHub\",\n \"url\": \"https://github.com/xorbitsai/inference\",\n \"icon\": \"fa-brands fa-github\",\n \"type\": \"fontawesome\",\n },\n ],\n \"navbar_align\": \"content\", # [left, content, right] For testing that the navbar items align properly\n \"navbar_start\": [\"navbar-logo\", \"version-switcher\"],\n \"navbar_center\": [\"navbar-nav\"],\n \"switcher\": {\n \"json_url\": json_url,\n \"version_match\": version_match,\n },\n}\n\n\nif version_match != 'zh-cn':\n html_theme_options['icon_links'].extend([{\n \"name\": \"Slack\",\n \"url\": \"https://join.slack.com/t/xorbitsio/shared_invite/zt-1o3z9ucdh-RbfhbPVpx7prOVdM1CAuxg\",\n \"icon\": \"fa-brands fa-slack\",\n \"type\": \"fontawesome\",\n },\n {\n \"name\": \"Twitter\",\n \"url\": \"https://twitter.com/xorbitsio\",\n \"icon\": \"fa-brands fa-twitter\",\n \"type\": \"fontawesome\",\n }])\nelse:\n html_theme_options['icon_links'].extend([{\n \"name\": \"WeChat\",\n \"url\": \"https://xorbits.cn/assets/images/wechat_work_qr.png\",\n \"icon\": \"fa-brands fa-weixin\",\n \"type\": \"fontawesome\",\n },\n {\n \"name\": \"Zhihu\",\n \"url\": \"https://zhihu.com/org/xorbits\",\n \"icon\": \"fa-brands fa-zhihu\",\n \"type\": \"fontawesome\",\n }])\n html_theme_options[\"external_links\"] = [\n {\"name\": \"\u4ea7\u54c1\u5b98\u7f51\", \"url\": \"https://xorbits.cn/inference\"},\n ]\n\nhtml_favicon = \"_static/favicon.svg\"\n", "path": "doc/source/conf.py"}], "after_files": [{"content": "# Configuration file for the Sphinx documentation builder.\n#\n# This file only contains a selection of the most common options. For a full\n# list see the documentation:\n# https://www.sphinx-doc.org/en/master/usage/configuration.html\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\nimport os\n# import sys\n# sys.path.insert(0, os.path.abspath('.'))\n\n\n# -- Project information -----------------------------------------------------\n\nproject = 'Xinference'\ncopyright = '2023, Xorbits Inc.'\nauthor = 'xorbitsai'\n\n\n# -- General configuration ---------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n \"sphinx.ext.mathjax\",\n \"sphinx.ext.ifconfig\",\n \"sphinx.ext.intersphinx\",\n \"sphinx.ext.viewcode\",\n \"sphinx.ext.githubpages\",\n \"sphinx.ext.autosummary\",\n \"sphinx.ext.napoleon\",\n \"sphinx_tabs.tabs\",\n \"sphinx_design\",\n \"IPython.sphinxext.ipython_directive\",\n \"IPython.sphinxext.ipython_console_highlighting\",\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = []\n\n# i18n\nlocale_dirs = [\"locale/\"] # path is example but recommended.\ngettext_compact = False # optional\n\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = 'pydata_sphinx_theme'\nhtml_title = \"Xinference\"\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\n# Define the json_url for our version switcher.\nversion_match = os.environ.get(\"READTHEDOCS_LANGUAGE\")\njson_url = \"https://inference.readthedocs.io/en/latest/_static/switcher.json\"\nif not version_match:\n version_match = 'en'\n\nhtml_theme_options = {\n \"show_toc_level\": 2,\n \"header_links_before_dropdown\": 7,\n \"icon_links\": [\n {\n \"name\": \"GitHub\",\n \"url\": \"https://github.com/xorbitsai/inference\",\n \"icon\": \"fa-brands fa-github\",\n \"type\": \"fontawesome\",\n },\n ],\n \"navbar_align\": \"content\", # [left, content, right] For testing that the navbar items align properly\n \"navbar_start\": [\"navbar-logo\", \"version-switcher\"],\n \"navbar_center\": [\"navbar-nav\"],\n \"switcher\": {\n \"json_url\": json_url,\n \"version_match\": version_match,\n },\n}\n\n\nif version_match != 'zh-cn':\n html_theme_options['icon_links'].extend([{\n \"name\": \"Slack\",\n \"url\": \"https://join.slack.com/t/xorbitsio/shared_invite/zt-1o3z9ucdh-RbfhbPVpx7prOVdM1CAuxg\",\n \"icon\": \"fa-brands fa-slack\",\n \"type\": \"fontawesome\",\n },\n {\n \"name\": \"Twitter\",\n \"url\": \"https://twitter.com/xorbitsio\",\n \"icon\": \"fa-brands fa-twitter\",\n \"type\": \"fontawesome\",\n }])\nelse:\n html_theme_options['icon_links'].extend([{\n \"name\": \"WeChat\",\n \"url\": \"https://xorbits.cn/assets/images/wechat_work_qr.png\",\n \"icon\": \"fa-brands fa-weixin\",\n \"type\": \"fontawesome\",\n },\n {\n \"name\": \"Zhihu\",\n \"url\": \"https://zhihu.com/org/xorbits\",\n \"icon\": \"fa-brands fa-zhihu\",\n \"type\": \"fontawesome\",\n }])\n html_theme_options[\"external_links\"] = [\n {\"name\": \"\u4ea7\u54c1\u5b98\u7f51\", \"url\": \"https://xorbits.cn/inference\"},\n ]\n\nhtml_favicon = \"_static/favicon.svg\"\n", "path": "doc/source/conf.py"}]}
| 1,587 | 93 |
gh_patches_debug_14687
|
rasdani/github-patches
|
git_diff
|
ultrabug__py3status-239
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Module "xrandr_rotate" makes i3 unusable
We have seen a case (see #217) when i3 becomes unusable after rotating screen using `xrandr_rotate` module. The symptoms are: `xrandr` process is stalled and only sending SIGCONT to it makes i3 usable again.
This issue is not reproducible for everyone. If you experience a similar behavior, please post here as many details as you can to help us investigate and fix this issue.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `py3status/modules/xrandr_rotate.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 """
4 Switch between horizontal and vertical screen rotation on a single click.
5
6 Configuration parameters:
7 cache_timeout: how often to refresh this module.
8 (default is 10)
9 format: a string that formats the output, can include placeholders.
10 (default is '{icon}')
11 hide_if_disconnected: a boolean flag to hide icon when `screen` is disconnected.
12 it has no effect unless `screen` option is also configured.
13 (default: None)
14 horizontal_icon: a character to represent horizontal rotation.
15 (default is 'H')
16 horizontal_rotation: a horizontal rotation for xrandr to use.
17 available options: 'normal' or 'inverted'.
18 (default is 'normal')
19 screen: display output name to rotate, as detected by xrandr.
20 if not provided, all enabled screens will be rotated.
21 (default: None)
22 vertical_icon: a character to represent vertical rotation.
23 (default is 'V')
24 vertical_rotation: a vertical rotation for xrandr to use.
25 available options: 'left' or 'right'.
26 (default is 'left')
27
28 Available placeholders for formatting the output:
29 {icon} a rotation icon, specified by `horizontal_icon` or `vertical_icon`.
30 {screen} a screen name, specified by `screen` option or detected automatically
31 if only one screen is connected, otherwise 'ALL'.
32
33
34 Remarks:
35 There have been cases when rotating a screen using this module made i3 unusabe.
36 If you experience a similar behavior, please report as many details as you can:
37 https://github.com/ultrabug/py3status/issues/227
38
39
40 @author Maxim Baz (https://github.com/maximbaz)
41 @license BSD
42 """
43
44 from subprocess import Popen, PIPE
45 from time import sleep, time
46
47
48 class Py3status:
49 """
50 """
51 # available configuration parameters
52 cache_timeout = 10
53 format = '{icon}'
54 hide_if_disconnected = False
55 horizontal_icon = 'H'
56 horizontal_rotation = 'normal'
57 screen = None
58 vertical_icon = 'V'
59 vertical_rotation = 'left'
60
61 def _call(self, cmd):
62 process = Popen(cmd, stdout=PIPE, shell=True)
63 output = process.communicate()[0] or ""
64 try:
65 # python3
66 output = output.decode()
67 except:
68 pass
69 return output.strip()
70
71 def _get_all_outputs(self):
72 cmd = 'xrandr -q | grep " connected [^(]" | cut -d " " -f1'
73 return self._call(cmd).split()
74
75 def _get_current_rotation_icon(self, all_outputs):
76 output = self.screen or all_outputs[0]
77 cmd = 'xrandr -q | grep "^' + output + '" | cut -d " " -f4'
78 output = self._call(cmd)
79 # xrandr may skip printing the 'normal', in which case the output would start from '('
80 is_horizontal = output.startswith('(') or output in ['normal', 'inverted']
81 return self.horizontal_icon if is_horizontal else self.vertical_icon
82
83 def _apply(self):
84 rotation = self.horizontal_rotation if self.displayed == self.horizontal_icon else self.vertical_rotation
85 outputs = [self.screen] if self.screen else self._get_all_outputs()
86 for output in outputs:
87 cmd = 'xrandr --output ' + output + ' --rotate ' + rotation
88 self._call(cmd)
89
90 def _switch_selection(self):
91 self.displayed = self.vertical_icon if self.displayed == self.horizontal_icon else self.horizontal_icon
92
93 def on_click(self, i3s_output_list, i3s_config, event):
94 """
95 Click events
96 - left click & scroll up/down: switch between rotations
97 - right click: apply selected rotation
98 """
99 button = event['button']
100 if button in [1, 4, 5]:
101 self._switch_selection()
102 elif button == 3:
103 self._apply()
104
105 def xrandr_rotate(self, i3s_output_list, i3s_config):
106 all_outputs = self._get_all_outputs()
107 selected_screen_disconnected = self.screen is not None and self.screen not in all_outputs
108 if selected_screen_disconnected and self.hide_if_disconnected:
109 full_text = ''
110 else:
111 if not hasattr(self, 'displayed'):
112 self.displayed = self._get_current_rotation_icon(all_outputs)
113
114 screen = self.screen or all_outputs[0] if len(all_outputs) == 1 else 'ALL'
115 full_text = self.format.format(icon=self.displayed or '?', screen=screen)
116
117 response = {
118 'cached_until': time() + self.cache_timeout,
119 'full_text': full_text
120 }
121
122 # coloration
123 if selected_screen_disconnected and not self.hide_if_disconnected:
124 response['color'] = i3s_config['color_degraded']
125 elif self.displayed == self._get_current_rotation_icon(all_outputs):
126 response['color'] = i3s_config['color_good']
127
128 return response
129
130
131 if __name__ == "__main__":
132 """
133 Test this module by calling it directly.
134 """
135 x = Py3status()
136 config = {
137 'color_bad': '#FF0000',
138 'color_degraded': '#FFFF00',
139 'color_good': '#00FF00'
140 }
141 while True:
142 print(x.xrandr_rotate([], config))
143 sleep(1)
144
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/py3status/modules/xrandr_rotate.py b/py3status/modules/xrandr_rotate.py
--- a/py3status/modules/xrandr_rotate.py
+++ b/py3status/modules/xrandr_rotate.py
@@ -84,8 +84,8 @@
rotation = self.horizontal_rotation if self.displayed == self.horizontal_icon else self.vertical_rotation
outputs = [self.screen] if self.screen else self._get_all_outputs()
for output in outputs:
- cmd = 'xrandr --output ' + output + ' --rotate ' + rotation
- self._call(cmd)
+ cmd = 'exec xrandr --output ' + output + ' --rotate ' + rotation
+ Popen(['i3-msg', cmd], stdout=PIPE)
def _switch_selection(self):
self.displayed = self.vertical_icon if self.displayed == self.horizontal_icon else self.horizontal_icon
|
{"golden_diff": "diff --git a/py3status/modules/xrandr_rotate.py b/py3status/modules/xrandr_rotate.py\n--- a/py3status/modules/xrandr_rotate.py\n+++ b/py3status/modules/xrandr_rotate.py\n@@ -84,8 +84,8 @@\n rotation = self.horizontal_rotation if self.displayed == self.horizontal_icon else self.vertical_rotation\n outputs = [self.screen] if self.screen else self._get_all_outputs()\n for output in outputs:\n- cmd = 'xrandr --output ' + output + ' --rotate ' + rotation\n- self._call(cmd)\n+ cmd = 'exec xrandr --output ' + output + ' --rotate ' + rotation\n+ Popen(['i3-msg', cmd], stdout=PIPE)\n \n def _switch_selection(self):\n self.displayed = self.vertical_icon if self.displayed == self.horizontal_icon else self.horizontal_icon\n", "issue": "Module \"xrandr_rotate\" makes i3 unusable\nWe have seen a case (see #217) when i3 becomes unusable after rotating screen using `xrandr_rotate` module. The symptoms are: `xrandr` process is stalled and only sending SIGCONT to it makes i3 usable again.\n\nThis issue is not reproducible for everyone. If you experience a similar behavior, please post here as many details as you can to help us investigate and fix this issue.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n\"\"\"\nSwitch between horizontal and vertical screen rotation on a single click.\n\nConfiguration parameters:\n cache_timeout: how often to refresh this module.\n (default is 10)\n format: a string that formats the output, can include placeholders.\n (default is '{icon}')\n hide_if_disconnected: a boolean flag to hide icon when `screen` is disconnected.\n it has no effect unless `screen` option is also configured.\n (default: None)\n horizontal_icon: a character to represent horizontal rotation.\n (default is 'H')\n horizontal_rotation: a horizontal rotation for xrandr to use.\n available options: 'normal' or 'inverted'.\n (default is 'normal')\n screen: display output name to rotate, as detected by xrandr.\n if not provided, all enabled screens will be rotated.\n (default: None)\n vertical_icon: a character to represent vertical rotation.\n (default is 'V')\n vertical_rotation: a vertical rotation for xrandr to use.\n available options: 'left' or 'right'.\n (default is 'left')\n\nAvailable placeholders for formatting the output:\n {icon} a rotation icon, specified by `horizontal_icon` or `vertical_icon`.\n {screen} a screen name, specified by `screen` option or detected automatically\n if only one screen is connected, otherwise 'ALL'.\n\n\nRemarks:\n There have been cases when rotating a screen using this module made i3 unusabe.\n If you experience a similar behavior, please report as many details as you can:\n https://github.com/ultrabug/py3status/issues/227\n\n\n@author Maxim Baz (https://github.com/maximbaz)\n@license BSD\n\"\"\"\n\nfrom subprocess import Popen, PIPE\nfrom time import sleep, time\n\n\nclass Py3status:\n \"\"\"\n \"\"\"\n # available configuration parameters\n cache_timeout = 10\n format = '{icon}'\n hide_if_disconnected = False\n horizontal_icon = 'H'\n horizontal_rotation = 'normal'\n screen = None\n vertical_icon = 'V'\n vertical_rotation = 'left'\n\n def _call(self, cmd):\n process = Popen(cmd, stdout=PIPE, shell=True)\n output = process.communicate()[0] or \"\"\n try:\n # python3\n output = output.decode()\n except:\n pass\n return output.strip()\n\n def _get_all_outputs(self):\n cmd = 'xrandr -q | grep \" connected [^(]\" | cut -d \" \" -f1'\n return self._call(cmd).split()\n\n def _get_current_rotation_icon(self, all_outputs):\n output = self.screen or all_outputs[0]\n cmd = 'xrandr -q | grep \"^' + output + '\" | cut -d \" \" -f4'\n output = self._call(cmd)\n # xrandr may skip printing the 'normal', in which case the output would start from '('\n is_horizontal = output.startswith('(') or output in ['normal', 'inverted']\n return self.horizontal_icon if is_horizontal else self.vertical_icon\n\n def _apply(self):\n rotation = self.horizontal_rotation if self.displayed == self.horizontal_icon else self.vertical_rotation\n outputs = [self.screen] if self.screen else self._get_all_outputs()\n for output in outputs:\n cmd = 'xrandr --output ' + output + ' --rotate ' + rotation\n self._call(cmd)\n\n def _switch_selection(self):\n self.displayed = self.vertical_icon if self.displayed == self.horizontal_icon else self.horizontal_icon\n\n def on_click(self, i3s_output_list, i3s_config, event):\n \"\"\"\n Click events\n - left click & scroll up/down: switch between rotations\n - right click: apply selected rotation\n \"\"\"\n button = event['button']\n if button in [1, 4, 5]:\n self._switch_selection()\n elif button == 3:\n self._apply()\n\n def xrandr_rotate(self, i3s_output_list, i3s_config):\n all_outputs = self._get_all_outputs()\n selected_screen_disconnected = self.screen is not None and self.screen not in all_outputs\n if selected_screen_disconnected and self.hide_if_disconnected:\n full_text = ''\n else:\n if not hasattr(self, 'displayed'):\n self.displayed = self._get_current_rotation_icon(all_outputs)\n\n screen = self.screen or all_outputs[0] if len(all_outputs) == 1 else 'ALL'\n full_text = self.format.format(icon=self.displayed or '?', screen=screen)\n\n response = {\n 'cached_until': time() + self.cache_timeout,\n 'full_text': full_text\n }\n\n # coloration\n if selected_screen_disconnected and not self.hide_if_disconnected:\n response['color'] = i3s_config['color_degraded']\n elif self.displayed == self._get_current_rotation_icon(all_outputs):\n response['color'] = i3s_config['color_good']\n\n return response\n\n\nif __name__ == \"__main__\":\n \"\"\"\n Test this module by calling it directly.\n \"\"\"\n x = Py3status()\n config = {\n 'color_bad': '#FF0000',\n 'color_degraded': '#FFFF00',\n 'color_good': '#00FF00'\n }\n while True:\n print(x.xrandr_rotate([], config))\n sleep(1)\n", "path": "py3status/modules/xrandr_rotate.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n\"\"\"\nSwitch between horizontal and vertical screen rotation on a single click.\n\nConfiguration parameters:\n cache_timeout: how often to refresh this module.\n (default is 10)\n format: a string that formats the output, can include placeholders.\n (default is '{icon}')\n hide_if_disconnected: a boolean flag to hide icon when `screen` is disconnected.\n it has no effect unless `screen` option is also configured.\n (default: None)\n horizontal_icon: a character to represent horizontal rotation.\n (default is 'H')\n horizontal_rotation: a horizontal rotation for xrandr to use.\n available options: 'normal' or 'inverted'.\n (default is 'normal')\n screen: display output name to rotate, as detected by xrandr.\n if not provided, all enabled screens will be rotated.\n (default: None)\n vertical_icon: a character to represent vertical rotation.\n (default is 'V')\n vertical_rotation: a vertical rotation for xrandr to use.\n available options: 'left' or 'right'.\n (default is 'left')\n\nAvailable placeholders for formatting the output:\n {icon} a rotation icon, specified by `horizontal_icon` or `vertical_icon`.\n {screen} a screen name, specified by `screen` option or detected automatically\n if only one screen is connected, otherwise 'ALL'.\n\n\nRemarks:\n There have been cases when rotating a screen using this module made i3 unusabe.\n If you experience a similar behavior, please report as many details as you can:\n https://github.com/ultrabug/py3status/issues/227\n\n\n@author Maxim Baz (https://github.com/maximbaz)\n@license BSD\n\"\"\"\n\nfrom subprocess import Popen, PIPE\nfrom time import sleep, time\n\n\nclass Py3status:\n \"\"\"\n \"\"\"\n # available configuration parameters\n cache_timeout = 10\n format = '{icon}'\n hide_if_disconnected = False\n horizontal_icon = 'H'\n horizontal_rotation = 'normal'\n screen = None\n vertical_icon = 'V'\n vertical_rotation = 'left'\n\n def _call(self, cmd):\n process = Popen(cmd, stdout=PIPE, shell=True)\n output = process.communicate()[0] or \"\"\n try:\n # python3\n output = output.decode()\n except:\n pass\n return output.strip()\n\n def _get_all_outputs(self):\n cmd = 'xrandr -q | grep \" connected [^(]\" | cut -d \" \" -f1'\n return self._call(cmd).split()\n\n def _get_current_rotation_icon(self, all_outputs):\n output = self.screen or all_outputs[0]\n cmd = 'xrandr -q | grep \"^' + output + '\" | cut -d \" \" -f4'\n output = self._call(cmd)\n # xrandr may skip printing the 'normal', in which case the output would start from '('\n is_horizontal = output.startswith('(') or output in ['normal', 'inverted']\n return self.horizontal_icon if is_horizontal else self.vertical_icon\n\n def _apply(self):\n rotation = self.horizontal_rotation if self.displayed == self.horizontal_icon else self.vertical_rotation\n outputs = [self.screen] if self.screen else self._get_all_outputs()\n for output in outputs:\n cmd = 'exec xrandr --output ' + output + ' --rotate ' + rotation\n Popen(['i3-msg', cmd], stdout=PIPE)\n\n def _switch_selection(self):\n self.displayed = self.vertical_icon if self.displayed == self.horizontal_icon else self.horizontal_icon\n\n def on_click(self, i3s_output_list, i3s_config, event):\n \"\"\"\n Click events\n - left click & scroll up/down: switch between rotations\n - right click: apply selected rotation\n \"\"\"\n button = event['button']\n if button in [1, 4, 5]:\n self._switch_selection()\n elif button == 3:\n self._apply()\n\n def xrandr_rotate(self, i3s_output_list, i3s_config):\n all_outputs = self._get_all_outputs()\n selected_screen_disconnected = self.screen is not None and self.screen not in all_outputs\n if selected_screen_disconnected and self.hide_if_disconnected:\n full_text = ''\n else:\n if not hasattr(self, 'displayed'):\n self.displayed = self._get_current_rotation_icon(all_outputs)\n\n screen = self.screen or all_outputs[0] if len(all_outputs) == 1 else 'ALL'\n full_text = self.format.format(icon=self.displayed or '?', screen=screen)\n\n response = {\n 'cached_until': time() + self.cache_timeout,\n 'full_text': full_text\n }\n\n # coloration\n if selected_screen_disconnected and not self.hide_if_disconnected:\n response['color'] = i3s_config['color_degraded']\n elif self.displayed == self._get_current_rotation_icon(all_outputs):\n response['color'] = i3s_config['color_good']\n\n return response\n\n\nif __name__ == \"__main__\":\n \"\"\"\n Test this module by calling it directly.\n \"\"\"\n x = Py3status()\n config = {\n 'color_bad': '#FF0000',\n 'color_degraded': '#FFFF00',\n 'color_good': '#00FF00'\n }\n while True:\n print(x.xrandr_rotate([], config))\n sleep(1)\n", "path": "py3status/modules/xrandr_rotate.py"}]}
| 1,881 | 196 |
gh_patches_debug_17951
|
rasdani/github-patches
|
git_diff
|
wemake-services__wemake-python-styleguide-243
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Forbid to compare the identical variables
# Rule request
<!-- Thanks for coming up with a proposal! -->
## Thesis
We need to find cases like this: `if some_value == some_value:`.
We also need to find other operators: `is`, `is not`, `>`, `<`, `!=`, `>=`, `<=`.
## Reasoning
It clearly does not make any sense. It is a bug in the source code.
Related: https://help.semmle.com/wiki/display/PYTHON/Comparison+of+identical+values
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `wemake_python_styleguide/visitors/ast/comparisons.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 import ast
4 from typing import ClassVar, Sequence
5
6 from wemake_python_styleguide.types import AnyNodes
7 from wemake_python_styleguide.violations.consistency import (
8 ComparisonOrderViolation,
9 ConstantComparisonViolation,
10 MultipleInComparisonViolation,
11 )
12 from wemake_python_styleguide.visitors.base import BaseNodeVisitor
13
14
15 class ConstantComparisonVisitor(BaseNodeVisitor):
16 """Restricts the comparison of literals."""
17
18 def _check_is_literal(self, node: ast.AST) -> bool:
19 """
20 Checks for nodes that contains only constants.
21
22 If the node contains only literals it will be evaluted.
23 When node relies on some other names, it won't be evaluted.
24 """
25 try:
26 ast.literal_eval(node)
27 except ValueError:
28 return False
29 else:
30 return True
31
32 def _check_literal_compare(self, node: ast.Compare) -> None:
33 last_was_literal = self._check_is_literal(node.left)
34 for comparator in node.comparators:
35 next_is_literal = self._check_is_literal(comparator)
36 if last_was_literal and next_is_literal:
37 self.add_violation(ConstantComparisonViolation(node))
38 break
39 last_was_literal = next_is_literal
40
41 def visit_Compare(self, node: ast.Compare) -> None:
42 """
43 Ensures that compares are written correctly.
44
45 Raises:
46 ConstantComparisonViolation
47
48 """
49 self._check_literal_compare(node)
50 self.generic_visit(node)
51
52
53 class WrongOrderVisitor(BaseNodeVisitor):
54 """Restricts comparision where argument doesn't come first."""
55
56 _allowed_left_nodes: ClassVar[AnyNodes] = (
57 ast.Name,
58 ast.Call,
59 ast.Attribute,
60 )
61
62 _special_cases: ClassVar[AnyNodes] = (
63 ast.In,
64 ast.NotIn,
65 )
66
67 def _is_special_case(self, node: ast.Compare) -> bool:
68 """
69 Operators ``in`` and ``not in`` are special cases.
70
71 Why? Because it is perfectly fine to use something like:
72 ``if 'key' in some_dict: ...``
73 This should not be an issue.
74
75 When there are multiple special operators it is still a separate issue.
76 """
77 return isinstance(node.ops[0], self._special_cases)
78
79 def _is_left_node_valid(self, left: ast.AST) -> bool:
80 if isinstance(left, self._allowed_left_nodes):
81 return True
82 if isinstance(left, ast.BinOp):
83 return (
84 self._is_left_node_valid(left.left) or
85 self._is_left_node_valid(left.right)
86 )
87 return False
88
89 def _has_wrong_nodes_on_the_right(
90 self,
91 comparators: Sequence[ast.AST],
92 ) -> bool:
93 for right in comparators:
94 if isinstance(right, self._allowed_left_nodes):
95 return True
96 if isinstance(right, ast.BinOp):
97 return self._has_wrong_nodes_on_the_right([
98 right.left, right.right,
99 ])
100 return False
101
102 def _check_ordering(self, node: ast.Compare) -> None:
103 if self._is_left_node_valid(node.left):
104 return
105
106 if self._is_special_case(node):
107 return
108
109 if len(node.comparators) > 1:
110 return
111
112 if not self._has_wrong_nodes_on_the_right(node.comparators):
113 return
114
115 self.add_violation(ComparisonOrderViolation(node))
116
117 def visit_Compare(self, node: ast.Compare) -> None:
118 """
119 Forbids comparision where argument doesn't come first.
120
121 Raises:
122 ComparisonOrderViolation
123
124 """
125 self._check_ordering(node)
126 self.generic_visit(node)
127
128
129 # TODO(@sobolevn): refactor to be a single visitor
130 class MultipleInVisitor(BaseNodeVisitor):
131 """Restricts comparision where multiple `in`s are used."""
132
133 def _has_multiple_in_comparisons(self, node: ast.Compare) -> bool:
134 count = 0
135 for op in node.ops:
136 if isinstance(op, ast.In):
137 count += 1
138 return count > 1
139
140 def _count_in_comparisons(self, node: ast.Compare) -> None:
141 if self._has_multiple_in_comparisons(node):
142 self.add_violation(MultipleInComparisonViolation(node))
143
144 def visit_Compare(self, node: ast.Compare) -> None:
145 """
146 Forbids comparisons including multiple 'in's in a statement.
147
148 Raise:
149 MultipleInComparisonViolation
150
151 """
152 self._count_in_comparisons(node)
153 self.generic_visit(node)
154
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/wemake_python_styleguide/visitors/ast/comparisons.py b/wemake_python_styleguide/visitors/ast/comparisons.py
--- a/wemake_python_styleguide/visitors/ast/comparisons.py
+++ b/wemake_python_styleguide/visitors/ast/comparisons.py
@@ -151,3 +151,32 @@
"""
self._count_in_comparisons(node)
self.generic_visit(node)
+
+
+class RedundantComparisonVisitor(BaseNodeVisitor):
+ """Restricts the comparison where always same result."""
+
+ def visit_Compare(self, node: ast.Compare) -> None:
+ """
+ Ensures that compares are not for same variable.
+
+ Raises:
+ ConstantComparisonViolation
+
+ """
+ self._check_redundant_compare(node)
+ self.generic_visit(node)
+
+ def _is_same_variable(self, left: ast.AST, right: ast.AST) -> bool:
+ if isinstance(left, ast.Name) and isinstance(right, ast.Name):
+ if left.id is right.id:
+ return True
+ return False
+
+ def _check_redundant_compare(self, node: ast.Compare) -> None:
+ last_variable = node.left
+ for next_variable in node.comparators:
+ if self._is_same_variable(last_variable, next_variable):
+ self.add_violation(ConstantComparisonViolation(node))
+ break
+ last_variable = next_variable
|
{"golden_diff": "diff --git a/wemake_python_styleguide/visitors/ast/comparisons.py b/wemake_python_styleguide/visitors/ast/comparisons.py\n--- a/wemake_python_styleguide/visitors/ast/comparisons.py\n+++ b/wemake_python_styleguide/visitors/ast/comparisons.py\n@@ -151,3 +151,32 @@\n \"\"\"\n self._count_in_comparisons(node)\n self.generic_visit(node)\n+\n+\n+class RedundantComparisonVisitor(BaseNodeVisitor):\n+ \"\"\"Restricts the comparison where always same result.\"\"\"\n+\n+ def visit_Compare(self, node: ast.Compare) -> None:\n+ \"\"\"\n+ Ensures that compares are not for same variable.\n+\n+ Raises:\n+ ConstantComparisonViolation\n+\n+ \"\"\"\n+ self._check_redundant_compare(node)\n+ self.generic_visit(node)\n+\n+ def _is_same_variable(self, left: ast.AST, right: ast.AST) -> bool:\n+ if isinstance(left, ast.Name) and isinstance(right, ast.Name):\n+ if left.id is right.id:\n+ return True\n+ return False\n+\n+ def _check_redundant_compare(self, node: ast.Compare) -> None:\n+ last_variable = node.left\n+ for next_variable in node.comparators:\n+ if self._is_same_variable(last_variable, next_variable):\n+ self.add_violation(ConstantComparisonViolation(node))\n+ break\n+ last_variable = next_variable\n", "issue": "Forbid to compare the identical variables\n# Rule request\r\n\r\n<!-- Thanks for coming up with a proposal! -->\r\n\r\n## Thesis\r\n\r\nWe need to find cases like this: `if some_value == some_value:`.\r\nWe also need to find other operators: `is`, `is not`, `>`, `<`, `!=`, `>=`, `<=`.\r\n\r\n## Reasoning\r\n\r\nIt clearly does not make any sense. It is a bug in the source code.\r\n\r\nRelated: https://help.semmle.com/wiki/display/PYTHON/Comparison+of+identical+values\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport ast\nfrom typing import ClassVar, Sequence\n\nfrom wemake_python_styleguide.types import AnyNodes\nfrom wemake_python_styleguide.violations.consistency import (\n ComparisonOrderViolation,\n ConstantComparisonViolation,\n MultipleInComparisonViolation,\n)\nfrom wemake_python_styleguide.visitors.base import BaseNodeVisitor\n\n\nclass ConstantComparisonVisitor(BaseNodeVisitor):\n \"\"\"Restricts the comparison of literals.\"\"\"\n\n def _check_is_literal(self, node: ast.AST) -> bool:\n \"\"\"\n Checks for nodes that contains only constants.\n\n If the node contains only literals it will be evaluted.\n When node relies on some other names, it won't be evaluted.\n \"\"\"\n try:\n ast.literal_eval(node)\n except ValueError:\n return False\n else:\n return True\n\n def _check_literal_compare(self, node: ast.Compare) -> None:\n last_was_literal = self._check_is_literal(node.left)\n for comparator in node.comparators:\n next_is_literal = self._check_is_literal(comparator)\n if last_was_literal and next_is_literal:\n self.add_violation(ConstantComparisonViolation(node))\n break\n last_was_literal = next_is_literal\n\n def visit_Compare(self, node: ast.Compare) -> None:\n \"\"\"\n Ensures that compares are written correctly.\n\n Raises:\n ConstantComparisonViolation\n\n \"\"\"\n self._check_literal_compare(node)\n self.generic_visit(node)\n\n\nclass WrongOrderVisitor(BaseNodeVisitor):\n \"\"\"Restricts comparision where argument doesn't come first.\"\"\"\n\n _allowed_left_nodes: ClassVar[AnyNodes] = (\n ast.Name,\n ast.Call,\n ast.Attribute,\n )\n\n _special_cases: ClassVar[AnyNodes] = (\n ast.In,\n ast.NotIn,\n )\n\n def _is_special_case(self, node: ast.Compare) -> bool:\n \"\"\"\n Operators ``in`` and ``not in`` are special cases.\n\n Why? Because it is perfectly fine to use something like:\n ``if 'key' in some_dict: ...``\n This should not be an issue.\n\n When there are multiple special operators it is still a separate issue.\n \"\"\"\n return isinstance(node.ops[0], self._special_cases)\n\n def _is_left_node_valid(self, left: ast.AST) -> bool:\n if isinstance(left, self._allowed_left_nodes):\n return True\n if isinstance(left, ast.BinOp):\n return (\n self._is_left_node_valid(left.left) or\n self._is_left_node_valid(left.right)\n )\n return False\n\n def _has_wrong_nodes_on_the_right(\n self,\n comparators: Sequence[ast.AST],\n ) -> bool:\n for right in comparators:\n if isinstance(right, self._allowed_left_nodes):\n return True\n if isinstance(right, ast.BinOp):\n return self._has_wrong_nodes_on_the_right([\n right.left, right.right,\n ])\n return False\n\n def _check_ordering(self, node: ast.Compare) -> None:\n if self._is_left_node_valid(node.left):\n return\n\n if self._is_special_case(node):\n return\n\n if len(node.comparators) > 1:\n return\n\n if not self._has_wrong_nodes_on_the_right(node.comparators):\n return\n\n self.add_violation(ComparisonOrderViolation(node))\n\n def visit_Compare(self, node: ast.Compare) -> None:\n \"\"\"\n Forbids comparision where argument doesn't come first.\n\n Raises:\n ComparisonOrderViolation\n\n \"\"\"\n self._check_ordering(node)\n self.generic_visit(node)\n\n\n# TODO(@sobolevn): refactor to be a single visitor\nclass MultipleInVisitor(BaseNodeVisitor):\n \"\"\"Restricts comparision where multiple `in`s are used.\"\"\"\n\n def _has_multiple_in_comparisons(self, node: ast.Compare) -> bool:\n count = 0\n for op in node.ops:\n if isinstance(op, ast.In):\n count += 1\n return count > 1\n\n def _count_in_comparisons(self, node: ast.Compare) -> None:\n if self._has_multiple_in_comparisons(node):\n self.add_violation(MultipleInComparisonViolation(node))\n\n def visit_Compare(self, node: ast.Compare) -> None:\n \"\"\"\n Forbids comparisons including multiple 'in's in a statement.\n\n Raise:\n MultipleInComparisonViolation\n\n \"\"\"\n self._count_in_comparisons(node)\n self.generic_visit(node)\n", "path": "wemake_python_styleguide/visitors/ast/comparisons.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport ast\nfrom typing import ClassVar, Sequence\n\nfrom wemake_python_styleguide.types import AnyNodes\nfrom wemake_python_styleguide.violations.consistency import (\n ComparisonOrderViolation,\n ConstantComparisonViolation,\n MultipleInComparisonViolation,\n)\nfrom wemake_python_styleguide.visitors.base import BaseNodeVisitor\n\n\nclass ConstantComparisonVisitor(BaseNodeVisitor):\n \"\"\"Restricts the comparison of literals.\"\"\"\n\n def _check_is_literal(self, node: ast.AST) -> bool:\n \"\"\"\n Checks for nodes that contains only constants.\n\n If the node contains only literals it will be evaluted.\n When node relies on some other names, it won't be evaluted.\n \"\"\"\n try:\n ast.literal_eval(node)\n except ValueError:\n return False\n else:\n return True\n\n def _check_literal_compare(self, node: ast.Compare) -> None:\n last_was_literal = self._check_is_literal(node.left)\n for comparator in node.comparators:\n next_is_literal = self._check_is_literal(comparator)\n if last_was_literal and next_is_literal:\n self.add_violation(ConstantComparisonViolation(node))\n break\n last_was_literal = next_is_literal\n\n def visit_Compare(self, node: ast.Compare) -> None:\n \"\"\"\n Ensures that compares are written correctly.\n\n Raises:\n ConstantComparisonViolation\n\n \"\"\"\n self._check_literal_compare(node)\n self.generic_visit(node)\n\n\nclass WrongOrderVisitor(BaseNodeVisitor):\n \"\"\"Restricts comparision where argument doesn't come first.\"\"\"\n\n _allowed_left_nodes: ClassVar[AnyNodes] = (\n ast.Name,\n ast.Call,\n ast.Attribute,\n )\n\n _special_cases: ClassVar[AnyNodes] = (\n ast.In,\n ast.NotIn,\n )\n\n def _is_special_case(self, node: ast.Compare) -> bool:\n \"\"\"\n Operators ``in`` and ``not in`` are special cases.\n\n Why? Because it is perfectly fine to use something like:\n ``if 'key' in some_dict: ...``\n This should not be an issue.\n\n When there are multiple special operators it is still a separate issue.\n \"\"\"\n return isinstance(node.ops[0], self._special_cases)\n\n def _is_left_node_valid(self, left: ast.AST) -> bool:\n if isinstance(left, self._allowed_left_nodes):\n return True\n if isinstance(left, ast.BinOp):\n return (\n self._is_left_node_valid(left.left) or\n self._is_left_node_valid(left.right)\n )\n return False\n\n def _has_wrong_nodes_on_the_right(\n self,\n comparators: Sequence[ast.AST],\n ) -> bool:\n for right in comparators:\n if isinstance(right, self._allowed_left_nodes):\n return True\n if isinstance(right, ast.BinOp):\n return self._has_wrong_nodes_on_the_right([\n right.left, right.right,\n ])\n return False\n\n def _check_ordering(self, node: ast.Compare) -> None:\n if self._is_left_node_valid(node.left):\n return\n\n if self._is_special_case(node):\n return\n\n if len(node.comparators) > 1:\n return\n\n if not self._has_wrong_nodes_on_the_right(node.comparators):\n return\n\n self.add_violation(ComparisonOrderViolation(node))\n\n def visit_Compare(self, node: ast.Compare) -> None:\n \"\"\"\n Forbids comparision where argument doesn't come first.\n\n Raises:\n ComparisonOrderViolation\n\n \"\"\"\n self._check_ordering(node)\n self.generic_visit(node)\n\n\n# TODO(@sobolevn): refactor to be a single visitor\nclass MultipleInVisitor(BaseNodeVisitor):\n \"\"\"Restricts comparision where multiple `in`s are used.\"\"\"\n\n def _has_multiple_in_comparisons(self, node: ast.Compare) -> bool:\n count = 0\n for op in node.ops:\n if isinstance(op, ast.In):\n count += 1\n return count > 1\n\n def _count_in_comparisons(self, node: ast.Compare) -> None:\n if self._has_multiple_in_comparisons(node):\n self.add_violation(MultipleInComparisonViolation(node))\n\n def visit_Compare(self, node: ast.Compare) -> None:\n \"\"\"\n Forbids comparisons including multiple 'in's in a statement.\n\n Raise:\n MultipleInComparisonViolation\n\n \"\"\"\n self._count_in_comparisons(node)\n self.generic_visit(node)\n\n\nclass RedundantComparisonVisitor(BaseNodeVisitor):\n \"\"\"Restricts the comparison where always same result.\"\"\"\n\n def visit_Compare(self, node: ast.Compare) -> None:\n \"\"\"\n Ensures that compares are not for same variable.\n\n Raises:\n ConstantComparisonViolation\n\n \"\"\"\n self._check_redundant_compare(node)\n self.generic_visit(node)\n\n def _is_same_variable(self, left: ast.AST, right: ast.AST) -> bool:\n if isinstance(left, ast.Name) and isinstance(right, ast.Name):\n if left.id is right.id:\n return True\n return False\n\n def _check_redundant_compare(self, node: ast.Compare) -> None:\n last_variable = node.left\n for next_variable in node.comparators:\n if self._is_same_variable(last_variable, next_variable):\n self.add_violation(ConstantComparisonViolation(node))\n break\n last_variable = next_variable\n", "path": "wemake_python_styleguide/visitors/ast/comparisons.py"}]}
| 1,742 | 334 |
gh_patches_debug_15473
|
rasdani/github-patches
|
git_diff
|
matrix-org__synapse-5354
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Implement rooms v5
Synapse v1.0 will default to rooms v4, however the intention is that it also ships with rooms v5 which is envisaged to be rooms v4 + https://github.com/matrix-org/synapse/issues/4364
rooms v5 is yet to be MSC'd so does not formally exist at this point, this issue is a place holder.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `synapse/api/room_versions.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # Copyright 2019 New Vector Ltd
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15 import attr
16
17
18 class EventFormatVersions(object):
19 """This is an internal enum for tracking the version of the event format,
20 independently from the room version.
21 """
22 V1 = 1 # $id:server event id format
23 V2 = 2 # MSC1659-style $hash event id format: introduced for room v3
24 V3 = 3 # MSC1884-style $hash format: introduced for room v4
25
26
27 KNOWN_EVENT_FORMAT_VERSIONS = {
28 EventFormatVersions.V1,
29 EventFormatVersions.V2,
30 EventFormatVersions.V3,
31 }
32
33
34 class StateResolutionVersions(object):
35 """Enum to identify the state resolution algorithms"""
36 V1 = 1 # room v1 state res
37 V2 = 2 # MSC1442 state res: room v2 and later
38
39
40 class RoomDisposition(object):
41 STABLE = "stable"
42 UNSTABLE = "unstable"
43
44
45 @attr.s(slots=True, frozen=True)
46 class RoomVersion(object):
47 """An object which describes the unique attributes of a room version."""
48
49 identifier = attr.ib() # str; the identifier for this version
50 disposition = attr.ib() # str; one of the RoomDispositions
51 event_format = attr.ib() # int; one of the EventFormatVersions
52 state_res = attr.ib() # int; one of the StateResolutionVersions
53 enforce_key_validity = attr.ib() # bool
54
55
56 class RoomVersions(object):
57 V1 = RoomVersion(
58 "1",
59 RoomDisposition.STABLE,
60 EventFormatVersions.V1,
61 StateResolutionVersions.V1,
62 enforce_key_validity=False,
63 )
64 V2 = RoomVersion(
65 "2",
66 RoomDisposition.STABLE,
67 EventFormatVersions.V1,
68 StateResolutionVersions.V2,
69 enforce_key_validity=False,
70 )
71 V3 = RoomVersion(
72 "3",
73 RoomDisposition.STABLE,
74 EventFormatVersions.V2,
75 StateResolutionVersions.V2,
76 enforce_key_validity=False,
77 )
78 V4 = RoomVersion(
79 "4",
80 RoomDisposition.STABLE,
81 EventFormatVersions.V3,
82 StateResolutionVersions.V2,
83 enforce_key_validity=False,
84 )
85 VDH_TEST_KEY_VALIDITY = RoomVersion(
86 "vdh-test-key-validity",
87 RoomDisposition.UNSTABLE,
88 EventFormatVersions.V3,
89 StateResolutionVersions.V2,
90 enforce_key_validity=False,
91 )
92
93
94 KNOWN_ROOM_VERSIONS = {
95 v.identifier: v for v in (
96 RoomVersions.V1,
97 RoomVersions.V2,
98 RoomVersions.V3,
99 RoomVersions.V4,
100 RoomVersions.VDH_TEST_KEY_VALIDITY,
101 )
102 } # type: dict[str, RoomVersion]
103
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/synapse/api/room_versions.py b/synapse/api/room_versions.py
--- a/synapse/api/room_versions.py
+++ b/synapse/api/room_versions.py
@@ -82,12 +82,12 @@
StateResolutionVersions.V2,
enforce_key_validity=False,
)
- VDH_TEST_KEY_VALIDITY = RoomVersion(
- "vdh-test-key-validity",
- RoomDisposition.UNSTABLE,
+ V5 = RoomVersion(
+ "5",
+ RoomDisposition.STABLE,
EventFormatVersions.V3,
StateResolutionVersions.V2,
- enforce_key_validity=False,
+ enforce_key_validity=True,
)
@@ -97,6 +97,6 @@
RoomVersions.V2,
RoomVersions.V3,
RoomVersions.V4,
- RoomVersions.VDH_TEST_KEY_VALIDITY,
+ RoomVersions.V5,
)
} # type: dict[str, RoomVersion]
|
{"golden_diff": "diff --git a/synapse/api/room_versions.py b/synapse/api/room_versions.py\n--- a/synapse/api/room_versions.py\n+++ b/synapse/api/room_versions.py\n@@ -82,12 +82,12 @@\n StateResolutionVersions.V2,\n enforce_key_validity=False,\n )\n- VDH_TEST_KEY_VALIDITY = RoomVersion(\n- \"vdh-test-key-validity\",\n- RoomDisposition.UNSTABLE,\n+ V5 = RoomVersion(\n+ \"5\",\n+ RoomDisposition.STABLE,\n EventFormatVersions.V3,\n StateResolutionVersions.V2,\n- enforce_key_validity=False,\n+ enforce_key_validity=True,\n )\n \n \n@@ -97,6 +97,6 @@\n RoomVersions.V2,\n RoomVersions.V3,\n RoomVersions.V4,\n- RoomVersions.VDH_TEST_KEY_VALIDITY,\n+ RoomVersions.V5,\n )\n } # type: dict[str, RoomVersion]\n", "issue": "Implement rooms v5\nSynapse v1.0 will default to rooms v4, however the intention is that it also ships with rooms v5 which is envisaged to be rooms v4 + https://github.com/matrix-org/synapse/issues/4364\r\n\r\nrooms v5 is yet to be MSC'd so does not formally exist at this point, this issue is a place holder.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright 2019 New Vector Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport attr\n\n\nclass EventFormatVersions(object):\n \"\"\"This is an internal enum for tracking the version of the event format,\n independently from the room version.\n \"\"\"\n V1 = 1 # $id:server event id format\n V2 = 2 # MSC1659-style $hash event id format: introduced for room v3\n V3 = 3 # MSC1884-style $hash format: introduced for room v4\n\n\nKNOWN_EVENT_FORMAT_VERSIONS = {\n EventFormatVersions.V1,\n EventFormatVersions.V2,\n EventFormatVersions.V3,\n}\n\n\nclass StateResolutionVersions(object):\n \"\"\"Enum to identify the state resolution algorithms\"\"\"\n V1 = 1 # room v1 state res\n V2 = 2 # MSC1442 state res: room v2 and later\n\n\nclass RoomDisposition(object):\n STABLE = \"stable\"\n UNSTABLE = \"unstable\"\n\n\[email protected](slots=True, frozen=True)\nclass RoomVersion(object):\n \"\"\"An object which describes the unique attributes of a room version.\"\"\"\n\n identifier = attr.ib() # str; the identifier for this version\n disposition = attr.ib() # str; one of the RoomDispositions\n event_format = attr.ib() # int; one of the EventFormatVersions\n state_res = attr.ib() # int; one of the StateResolutionVersions\n enforce_key_validity = attr.ib() # bool\n\n\nclass RoomVersions(object):\n V1 = RoomVersion(\n \"1\",\n RoomDisposition.STABLE,\n EventFormatVersions.V1,\n StateResolutionVersions.V1,\n enforce_key_validity=False,\n )\n V2 = RoomVersion(\n \"2\",\n RoomDisposition.STABLE,\n EventFormatVersions.V1,\n StateResolutionVersions.V2,\n enforce_key_validity=False,\n )\n V3 = RoomVersion(\n \"3\",\n RoomDisposition.STABLE,\n EventFormatVersions.V2,\n StateResolutionVersions.V2,\n enforce_key_validity=False,\n )\n V4 = RoomVersion(\n \"4\",\n RoomDisposition.STABLE,\n EventFormatVersions.V3,\n StateResolutionVersions.V2,\n enforce_key_validity=False,\n )\n VDH_TEST_KEY_VALIDITY = RoomVersion(\n \"vdh-test-key-validity\",\n RoomDisposition.UNSTABLE,\n EventFormatVersions.V3,\n StateResolutionVersions.V2,\n enforce_key_validity=False,\n )\n\n\nKNOWN_ROOM_VERSIONS = {\n v.identifier: v for v in (\n RoomVersions.V1,\n RoomVersions.V2,\n RoomVersions.V3,\n RoomVersions.V4,\n RoomVersions.VDH_TEST_KEY_VALIDITY,\n )\n} # type: dict[str, RoomVersion]\n", "path": "synapse/api/room_versions.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright 2019 New Vector Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport attr\n\n\nclass EventFormatVersions(object):\n \"\"\"This is an internal enum for tracking the version of the event format,\n independently from the room version.\n \"\"\"\n V1 = 1 # $id:server event id format\n V2 = 2 # MSC1659-style $hash event id format: introduced for room v3\n V3 = 3 # MSC1884-style $hash format: introduced for room v4\n\n\nKNOWN_EVENT_FORMAT_VERSIONS = {\n EventFormatVersions.V1,\n EventFormatVersions.V2,\n EventFormatVersions.V3,\n}\n\n\nclass StateResolutionVersions(object):\n \"\"\"Enum to identify the state resolution algorithms\"\"\"\n V1 = 1 # room v1 state res\n V2 = 2 # MSC1442 state res: room v2 and later\n\n\nclass RoomDisposition(object):\n STABLE = \"stable\"\n UNSTABLE = \"unstable\"\n\n\[email protected](slots=True, frozen=True)\nclass RoomVersion(object):\n \"\"\"An object which describes the unique attributes of a room version.\"\"\"\n\n identifier = attr.ib() # str; the identifier for this version\n disposition = attr.ib() # str; one of the RoomDispositions\n event_format = attr.ib() # int; one of the EventFormatVersions\n state_res = attr.ib() # int; one of the StateResolutionVersions\n enforce_key_validity = attr.ib() # bool\n\n\nclass RoomVersions(object):\n V1 = RoomVersion(\n \"1\",\n RoomDisposition.STABLE,\n EventFormatVersions.V1,\n StateResolutionVersions.V1,\n enforce_key_validity=False,\n )\n V2 = RoomVersion(\n \"2\",\n RoomDisposition.STABLE,\n EventFormatVersions.V1,\n StateResolutionVersions.V2,\n enforce_key_validity=False,\n )\n V3 = RoomVersion(\n \"3\",\n RoomDisposition.STABLE,\n EventFormatVersions.V2,\n StateResolutionVersions.V2,\n enforce_key_validity=False,\n )\n V4 = RoomVersion(\n \"4\",\n RoomDisposition.STABLE,\n EventFormatVersions.V3,\n StateResolutionVersions.V2,\n enforce_key_validity=False,\n )\n V5 = RoomVersion(\n \"5\",\n RoomDisposition.STABLE,\n EventFormatVersions.V3,\n StateResolutionVersions.V2,\n enforce_key_validity=True,\n )\n\n\nKNOWN_ROOM_VERSIONS = {\n v.identifier: v for v in (\n RoomVersions.V1,\n RoomVersions.V2,\n RoomVersions.V3,\n RoomVersions.V4,\n RoomVersions.V5,\n )\n} # type: dict[str, RoomVersion]\n", "path": "synapse/api/room_versions.py"}]}
| 1,290 | 218 |
gh_patches_debug_2853
|
rasdani/github-patches
|
git_diff
|
getsentry__sentry-python-1545
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Typo in setup.py for 1.9.1 release breaks `conda export`
### How do you use Sentry?
Self-hosted/on-premise
### Version
1.9.1
### Steps to Reproduce
This line
https://github.com/getsentry/sentry-python/commit/b7c0dc412a1505fff382732f567952c8a9572b60#diff-60f61ab7a8d1910d86d9fda2261620314edcae5894d5aaa236b821c7256badd7R43
`'urllib3>=1.26.11"; python_version >="3.6"',`
Has a stray `"` after the urllib3 version number.
This passes by pipi but confuses conda, causing a very opaque error message when you try to run `conda export`:
```
InvalidVersionSpec: Invalid version '(>=1.26.11': unable to convert to expression tree: ['(']
```
Steps to reproduce:
```
conda create -n test-sentry-sdk python=3.8
conda activate test-sentry-sdk
pip install sentry-sdk==1.9.1
conda export
```
### Expected Result
Expect to get a conda environment yaml spec.
### Actual Result
```
InvalidVersionSpec: Invalid version '(>=1.26.11': unable to convert to expression tree: ['(']
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2
3 """
4 Sentry-Python - Sentry SDK for Python
5 =====================================
6
7 **Sentry-Python is an SDK for Sentry.** Check out `GitHub
8 <https://github.com/getsentry/sentry-python>`_ to find out more.
9 """
10
11 import os
12 from setuptools import setup, find_packages
13
14 here = os.path.abspath(os.path.dirname(__file__))
15
16
17 def get_file_text(file_name):
18 with open(os.path.join(here, file_name)) as in_file:
19 return in_file.read()
20
21
22 setup(
23 name="sentry-sdk",
24 version="1.9.1",
25 author="Sentry Team and Contributors",
26 author_email="[email protected]",
27 url="https://github.com/getsentry/sentry-python",
28 project_urls={
29 "Documentation": "https://docs.sentry.io/platforms/python/",
30 "Changelog": "https://github.com/getsentry/sentry-python/blob/master/CHANGELOG.md",
31 },
32 description="Python client for Sentry (https://sentry.io)",
33 long_description=get_file_text("README.md"),
34 long_description_content_type="text/markdown",
35 packages=find_packages(exclude=("tests", "tests.*")),
36 # PEP 561
37 package_data={"sentry_sdk": ["py.typed"]},
38 zip_safe=False,
39 license="BSD",
40 install_requires=[
41 'urllib3>=1.25.7; python_version<="3.4"',
42 'urllib3>=1.26.9; python_version>="3.5"',
43 'urllib3>=1.26.11"; python_version >="3.6"',
44 "certifi",
45 ],
46 extras_require={
47 "flask": ["flask>=0.11", "blinker>=1.1"],
48 "quart": ["quart>=0.16.1", "blinker>=1.1"],
49 "bottle": ["bottle>=0.12.13"],
50 "falcon": ["falcon>=1.4"],
51 "django": ["django>=1.8"],
52 "sanic": ["sanic>=0.8"],
53 "celery": ["celery>=3"],
54 "beam": ["apache-beam>=2.12"],
55 "rq": ["rq>=0.6"],
56 "aiohttp": ["aiohttp>=3.5"],
57 "tornado": ["tornado>=5"],
58 "sqlalchemy": ["sqlalchemy>=1.2"],
59 "pyspark": ["pyspark>=2.4.4"],
60 "pure_eval": ["pure_eval", "executing", "asttokens"],
61 "chalice": ["chalice>=1.16.0"],
62 "httpx": ["httpx>=0.16.0"],
63 "starlette": ["starlette>=0.19.1"],
64 "fastapi": ["fastapi>=0.79.0"],
65 },
66 classifiers=[
67 "Development Status :: 5 - Production/Stable",
68 "Environment :: Web Environment",
69 "Intended Audience :: Developers",
70 "License :: OSI Approved :: BSD License",
71 "Operating System :: OS Independent",
72 "Programming Language :: Python",
73 "Programming Language :: Python :: 2",
74 "Programming Language :: Python :: 2.7",
75 "Programming Language :: Python :: 3",
76 "Programming Language :: Python :: 3.4",
77 "Programming Language :: Python :: 3.5",
78 "Programming Language :: Python :: 3.6",
79 "Programming Language :: Python :: 3.7",
80 "Programming Language :: Python :: 3.8",
81 "Programming Language :: Python :: 3.9",
82 "Programming Language :: Python :: 3.10",
83 "Topic :: Software Development :: Libraries :: Python Modules",
84 ],
85 options={"bdist_wheel": {"universal": "1"}},
86 )
87
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -40,7 +40,7 @@
install_requires=[
'urllib3>=1.25.7; python_version<="3.4"',
'urllib3>=1.26.9; python_version>="3.5"',
- 'urllib3>=1.26.11"; python_version >="3.6"',
+ 'urllib3>=1.26.11; python_version >="3.6"',
"certifi",
],
extras_require={
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -40,7 +40,7 @@\n install_requires=[\n 'urllib3>=1.25.7; python_version<=\"3.4\"',\n 'urllib3>=1.26.9; python_version>=\"3.5\"',\n- 'urllib3>=1.26.11\"; python_version >=\"3.6\"',\n+ 'urllib3>=1.26.11; python_version >=\"3.6\"',\n \"certifi\",\n ],\n extras_require={\n", "issue": "Typo in setup.py for 1.9.1 release breaks `conda export`\n### How do you use Sentry?\n\nSelf-hosted/on-premise\n\n### Version\n\n1.9.1\n\n### Steps to Reproduce\n\nThis line \r\nhttps://github.com/getsentry/sentry-python/commit/b7c0dc412a1505fff382732f567952c8a9572b60#diff-60f61ab7a8d1910d86d9fda2261620314edcae5894d5aaa236b821c7256badd7R43\r\n\r\n`'urllib3>=1.26.11\"; python_version >=\"3.6\"',`\r\n\r\nHas a stray `\"` after the urllib3 version number.\r\n\r\nThis passes by pipi but confuses conda, causing a very opaque error message when you try to run `conda export`:\r\n```\r\nInvalidVersionSpec: Invalid version '(>=1.26.11': unable to convert to expression tree: ['(']\r\n```\r\n\r\nSteps to reproduce:\r\n```\r\nconda create -n test-sentry-sdk python=3.8\r\nconda activate test-sentry-sdk\r\npip install sentry-sdk==1.9.1\r\nconda export\r\n```\n\n### Expected Result\n\nExpect to get a conda environment yaml spec.\n\n### Actual Result\n\n```\r\nInvalidVersionSpec: Invalid version '(>=1.26.11': unable to convert to expression tree: ['(']\r\n```\n", "before_files": [{"content": "#!/usr/bin/env python\n\n\"\"\"\nSentry-Python - Sentry SDK for Python\n=====================================\n\n**Sentry-Python is an SDK for Sentry.** Check out `GitHub\n<https://github.com/getsentry/sentry-python>`_ to find out more.\n\"\"\"\n\nimport os\nfrom setuptools import setup, find_packages\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\n\ndef get_file_text(file_name):\n with open(os.path.join(here, file_name)) as in_file:\n return in_file.read()\n\n\nsetup(\n name=\"sentry-sdk\",\n version=\"1.9.1\",\n author=\"Sentry Team and Contributors\",\n author_email=\"[email protected]\",\n url=\"https://github.com/getsentry/sentry-python\",\n project_urls={\n \"Documentation\": \"https://docs.sentry.io/platforms/python/\",\n \"Changelog\": \"https://github.com/getsentry/sentry-python/blob/master/CHANGELOG.md\",\n },\n description=\"Python client for Sentry (https://sentry.io)\",\n long_description=get_file_text(\"README.md\"),\n long_description_content_type=\"text/markdown\",\n packages=find_packages(exclude=(\"tests\", \"tests.*\")),\n # PEP 561\n package_data={\"sentry_sdk\": [\"py.typed\"]},\n zip_safe=False,\n license=\"BSD\",\n install_requires=[\n 'urllib3>=1.25.7; python_version<=\"3.4\"',\n 'urllib3>=1.26.9; python_version>=\"3.5\"',\n 'urllib3>=1.26.11\"; python_version >=\"3.6\"',\n \"certifi\",\n ],\n extras_require={\n \"flask\": [\"flask>=0.11\", \"blinker>=1.1\"],\n \"quart\": [\"quart>=0.16.1\", \"blinker>=1.1\"],\n \"bottle\": [\"bottle>=0.12.13\"],\n \"falcon\": [\"falcon>=1.4\"],\n \"django\": [\"django>=1.8\"],\n \"sanic\": [\"sanic>=0.8\"],\n \"celery\": [\"celery>=3\"],\n \"beam\": [\"apache-beam>=2.12\"],\n \"rq\": [\"rq>=0.6\"],\n \"aiohttp\": [\"aiohttp>=3.5\"],\n \"tornado\": [\"tornado>=5\"],\n \"sqlalchemy\": [\"sqlalchemy>=1.2\"],\n \"pyspark\": [\"pyspark>=2.4.4\"],\n \"pure_eval\": [\"pure_eval\", \"executing\", \"asttokens\"],\n \"chalice\": [\"chalice>=1.16.0\"],\n \"httpx\": [\"httpx>=0.16.0\"],\n \"starlette\": [\"starlette>=0.19.1\"],\n \"fastapi\": [\"fastapi>=0.79.0\"],\n },\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Web Environment\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: BSD License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n ],\n options={\"bdist_wheel\": {\"universal\": \"1\"}},\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\n\"\"\"\nSentry-Python - Sentry SDK for Python\n=====================================\n\n**Sentry-Python is an SDK for Sentry.** Check out `GitHub\n<https://github.com/getsentry/sentry-python>`_ to find out more.\n\"\"\"\n\nimport os\nfrom setuptools import setup, find_packages\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\n\ndef get_file_text(file_name):\n with open(os.path.join(here, file_name)) as in_file:\n return in_file.read()\n\n\nsetup(\n name=\"sentry-sdk\",\n version=\"1.9.1\",\n author=\"Sentry Team and Contributors\",\n author_email=\"[email protected]\",\n url=\"https://github.com/getsentry/sentry-python\",\n project_urls={\n \"Documentation\": \"https://docs.sentry.io/platforms/python/\",\n \"Changelog\": \"https://github.com/getsentry/sentry-python/blob/master/CHANGELOG.md\",\n },\n description=\"Python client for Sentry (https://sentry.io)\",\n long_description=get_file_text(\"README.md\"),\n long_description_content_type=\"text/markdown\",\n packages=find_packages(exclude=(\"tests\", \"tests.*\")),\n # PEP 561\n package_data={\"sentry_sdk\": [\"py.typed\"]},\n zip_safe=False,\n license=\"BSD\",\n install_requires=[\n 'urllib3>=1.25.7; python_version<=\"3.4\"',\n 'urllib3>=1.26.9; python_version>=\"3.5\"',\n 'urllib3>=1.26.11; python_version >=\"3.6\"',\n \"certifi\",\n ],\n extras_require={\n \"flask\": [\"flask>=0.11\", \"blinker>=1.1\"],\n \"quart\": [\"quart>=0.16.1\", \"blinker>=1.1\"],\n \"bottle\": [\"bottle>=0.12.13\"],\n \"falcon\": [\"falcon>=1.4\"],\n \"django\": [\"django>=1.8\"],\n \"sanic\": [\"sanic>=0.8\"],\n \"celery\": [\"celery>=3\"],\n \"beam\": [\"apache-beam>=2.12\"],\n \"rq\": [\"rq>=0.6\"],\n \"aiohttp\": [\"aiohttp>=3.5\"],\n \"tornado\": [\"tornado>=5\"],\n \"sqlalchemy\": [\"sqlalchemy>=1.2\"],\n \"pyspark\": [\"pyspark>=2.4.4\"],\n \"pure_eval\": [\"pure_eval\", \"executing\", \"asttokens\"],\n \"chalice\": [\"chalice>=1.16.0\"],\n \"httpx\": [\"httpx>=0.16.0\"],\n \"starlette\": [\"starlette>=0.19.1\"],\n \"fastapi\": [\"fastapi>=0.79.0\"],\n },\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Web Environment\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: BSD License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n ],\n options={\"bdist_wheel\": {\"universal\": \"1\"}},\n)\n", "path": "setup.py"}]}
| 1,609 | 135 |
gh_patches_debug_34016
|
rasdani/github-patches
|
git_diff
|
akvo__akvo-rsr-2044
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
File upload for Indicator Period Data requires Django login
When uploading a photo/file to the /rest/v1/indicator_period_data/nnn/upload_file/ endpoint, a Django login is required, which is not the case for the rest of the API; an API key should be sufficient.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `akvo/rest/views/indicator_period_data.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 # Akvo RSR is covered by the GNU Affero General Public License.
4 # See more details in the license.txt file located at the root folder of the Akvo RSR module.
5 # For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.
6
7
8 from akvo.rsr.models import IndicatorPeriodData, IndicatorPeriodDataComment
9
10 from ..serializers import (IndicatorPeriodDataSerializer, IndicatorPeriodDataFrameworkSerializer,
11 IndicatorPeriodDataCommentSerializer)
12 from ..viewsets import PublicProjectViewSet
13
14 from rest_framework.decorators import api_view, permission_classes
15 from rest_framework.permissions import IsAuthenticated
16 from rest_framework.response import Response
17
18
19 class IndicatorPeriodDataViewSet(PublicProjectViewSet):
20 """
21 """
22 queryset = IndicatorPeriodData.objects.all()
23 serializer_class = IndicatorPeriodDataSerializer
24 filter_fields = {
25 'period': ['exact'],
26 'period__indicator': ['exact'],
27 'period__indicator__result': ['exact'],
28 'period__indicator__result__project': ['exact'],
29 'user': ['exact'],
30 'status': ['exact'],
31 'created_at': ['exact', 'gt', 'gte', 'lt', 'lte', ],
32 'last_modified_at': ['exact', 'gt', 'gte', 'lt', 'lte', ],
33 'relative_data': ['exact'],
34 'data': ['exact', 'gt', 'gte', 'lt', 'lte', ],
35 'period_actual_value': ['exact', 'gt', 'gte', 'lt', 'lte', ],
36 'update_method': ['exact']
37 }
38
39 project_relation = 'period__indicator__result__project__'
40
41
42 class IndicatorPeriodDataFrameworkViewSet(PublicProjectViewSet):
43 """
44 """
45 queryset = IndicatorPeriodData.objects.all()
46 serializer_class = IndicatorPeriodDataFrameworkSerializer
47 filter_fields = {
48 'period': ['exact'],
49 'period__indicator': ['exact'],
50 'period__indicator__result': ['exact'],
51 'period__indicator__result__project': ['exact'],
52 'user': ['exact'],
53 'status': ['exact'],
54 'created_at': ['exact', 'gt', 'gte', 'lt', 'lte', ],
55 'last_modified_at': ['exact', 'gt', 'gte', 'lt', 'lte', ],
56 'relative_data': ['exact'],
57 'data': ['exact', 'gt', 'gte', 'lt', 'lte', ],
58 'period_actual_value': ['exact', 'gt', 'gte', 'lt', 'lte', ],
59 'update_method': ['exact']
60 }
61 project_relation = 'period__indicator__result__project__'
62
63
64 class IndicatorPeriodDataCommentViewSet(PublicProjectViewSet):
65 """
66 """
67 queryset = IndicatorPeriodDataComment.objects.all()
68 serializer_class = IndicatorPeriodDataCommentSerializer
69 filter_fields = {
70 'data': ['exact'],
71 'data__period': ['exact'],
72 'data__period__indicator': ['exact'],
73 'data__period__indicator__result': ['exact'],
74 'data__period__indicator__result__project': ['exact'],
75 'user': ['exact'],
76 'created_at': ['exact', 'gt', 'gte', 'lt', 'lte', ],
77 'last_modified_at': ['exact', 'gt', 'gte', 'lt', 'lte', ]
78 }
79 project_relation = 'period__indicator__result__project__'
80
81
82 @api_view(['POST'])
83 @permission_classes((IsAuthenticated, ))
84 def indicator_upload_file(request, pk=None):
85 """
86 Special API call for directly uploading a file.
87
88 :param request; A Django request object.
89 :param pk; The primary key of an IndicatorPeriodData instance.
90 """
91 update = IndicatorPeriodData.objects.get(pk=pk)
92 upload_file = request.FILES['file']
93
94 # TODO: Permissions
95 # user = request.user
96
97 file_type = request.POST.copy()['type']
98 if file_type == 'photo':
99 update.photo = upload_file
100 update.save(update_fields=['photo'])
101 return Response({'file': update.photo.url})
102 elif file_type == 'file':
103 update.file = upload_file
104 update.save(update_fields=['file'])
105 return Response({'file': update.file.url})
106
107 # TODO: Error response
108 return Response({})
109
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/akvo/rest/views/indicator_period_data.py b/akvo/rest/views/indicator_period_data.py
--- a/akvo/rest/views/indicator_period_data.py
+++ b/akvo/rest/views/indicator_period_data.py
@@ -11,8 +11,10 @@
IndicatorPeriodDataCommentSerializer)
from ..viewsets import PublicProjectViewSet
+from django.http import HttpResponseForbidden
+
+from rest_framework import status
from rest_framework.decorators import api_view, permission_classes
-from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
@@ -80,7 +82,6 @@
@api_view(['POST'])
-@permission_classes((IsAuthenticated, ))
def indicator_upload_file(request, pk=None):
"""
Special API call for directly uploading a file.
@@ -91,18 +92,24 @@
update = IndicatorPeriodData.objects.get(pk=pk)
upload_file = request.FILES['file']
- # TODO: Permissions
- # user = request.user
-
- file_type = request.POST.copy()['type']
- if file_type == 'photo':
- update.photo = upload_file
- update.save(update_fields=['photo'])
- return Response({'file': update.photo.url})
- elif file_type == 'file':
- update.file = upload_file
- update.save(update_fields=['file'])
- return Response({'file': update.file.url})
-
- # TODO: Error response
- return Response({})
+ # Permissions
+ user = getattr(request, 'user', None)
+ if not user:
+ return Response({'error': 'User is not logged in'}, status=status.HTTP_403_FORBIDDEN)
+
+ if not user.has_perm('rsr.change_project', update.period.indicator.result.project):
+ return Response({'error': 'User has no permission to place an update'},
+ status=status.HTTP_403_FORBIDDEN)
+
+ try:
+ file_type = request.POST.copy()['type']
+ if file_type == 'photo':
+ update.photo = upload_file
+ update.save(update_fields=['photo'])
+ return Response({'file': update.photo.url})
+ elif file_type == 'file':
+ update.file = upload_file
+ update.save(update_fields=['file'])
+ return Response({'file': update.file.url})
+ except Exception as e:
+ return Response({'error': str(e)}, status=status.HTTP_400_BAD_REQUEST)
|
{"golden_diff": "diff --git a/akvo/rest/views/indicator_period_data.py b/akvo/rest/views/indicator_period_data.py\n--- a/akvo/rest/views/indicator_period_data.py\n+++ b/akvo/rest/views/indicator_period_data.py\n@@ -11,8 +11,10 @@\n IndicatorPeriodDataCommentSerializer)\n from ..viewsets import PublicProjectViewSet\n \n+from django.http import HttpResponseForbidden\n+\n+from rest_framework import status\n from rest_framework.decorators import api_view, permission_classes\n-from rest_framework.permissions import IsAuthenticated\n from rest_framework.response import Response\n \n \n@@ -80,7 +82,6 @@\n \n \n @api_view(['POST'])\n-@permission_classes((IsAuthenticated, ))\n def indicator_upload_file(request, pk=None):\n \"\"\"\n Special API call for directly uploading a file.\n@@ -91,18 +92,24 @@\n update = IndicatorPeriodData.objects.get(pk=pk)\n upload_file = request.FILES['file']\n \n- # TODO: Permissions\n- # user = request.user\n-\n- file_type = request.POST.copy()['type']\n- if file_type == 'photo':\n- update.photo = upload_file\n- update.save(update_fields=['photo'])\n- return Response({'file': update.photo.url})\n- elif file_type == 'file':\n- update.file = upload_file\n- update.save(update_fields=['file'])\n- return Response({'file': update.file.url})\n-\n- # TODO: Error response\n- return Response({})\n+ # Permissions\n+ user = getattr(request, 'user', None)\n+ if not user:\n+ return Response({'error': 'User is not logged in'}, status=status.HTTP_403_FORBIDDEN)\n+\n+ if not user.has_perm('rsr.change_project', update.period.indicator.result.project):\n+ return Response({'error': 'User has no permission to place an update'},\n+ status=status.HTTP_403_FORBIDDEN)\n+\n+ try:\n+ file_type = request.POST.copy()['type']\n+ if file_type == 'photo':\n+ update.photo = upload_file\n+ update.save(update_fields=['photo'])\n+ return Response({'file': update.photo.url})\n+ elif file_type == 'file':\n+ update.file = upload_file\n+ update.save(update_fields=['file'])\n+ return Response({'file': update.file.url})\n+ except Exception as e:\n+ return Response({'error': str(e)}, status=status.HTTP_400_BAD_REQUEST)\n", "issue": "File upload for Indicator Period Data requires Django login\nWhen uploading a photo/file to the /rest/v1/indicator_period_data/nnn/upload_file/ endpoint, a Django login is required, which is not the case for the rest of the API; an API key should be sufficient.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Akvo RSR is covered by the GNU Affero General Public License.\n# See more details in the license.txt file located at the root folder of the Akvo RSR module.\n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\n\nfrom akvo.rsr.models import IndicatorPeriodData, IndicatorPeriodDataComment\n\nfrom ..serializers import (IndicatorPeriodDataSerializer, IndicatorPeriodDataFrameworkSerializer,\n IndicatorPeriodDataCommentSerializer)\nfrom ..viewsets import PublicProjectViewSet\n\nfrom rest_framework.decorators import api_view, permission_classes\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.response import Response\n\n\nclass IndicatorPeriodDataViewSet(PublicProjectViewSet):\n \"\"\"\n \"\"\"\n queryset = IndicatorPeriodData.objects.all()\n serializer_class = IndicatorPeriodDataSerializer\n filter_fields = {\n 'period': ['exact'],\n 'period__indicator': ['exact'],\n 'period__indicator__result': ['exact'],\n 'period__indicator__result__project': ['exact'],\n 'user': ['exact'],\n 'status': ['exact'],\n 'created_at': ['exact', 'gt', 'gte', 'lt', 'lte', ],\n 'last_modified_at': ['exact', 'gt', 'gte', 'lt', 'lte', ],\n 'relative_data': ['exact'],\n 'data': ['exact', 'gt', 'gte', 'lt', 'lte', ],\n 'period_actual_value': ['exact', 'gt', 'gte', 'lt', 'lte', ],\n 'update_method': ['exact']\n }\n\n project_relation = 'period__indicator__result__project__'\n\n\nclass IndicatorPeriodDataFrameworkViewSet(PublicProjectViewSet):\n \"\"\"\n \"\"\"\n queryset = IndicatorPeriodData.objects.all()\n serializer_class = IndicatorPeriodDataFrameworkSerializer\n filter_fields = {\n 'period': ['exact'],\n 'period__indicator': ['exact'],\n 'period__indicator__result': ['exact'],\n 'period__indicator__result__project': ['exact'],\n 'user': ['exact'],\n 'status': ['exact'],\n 'created_at': ['exact', 'gt', 'gte', 'lt', 'lte', ],\n 'last_modified_at': ['exact', 'gt', 'gte', 'lt', 'lte', ],\n 'relative_data': ['exact'],\n 'data': ['exact', 'gt', 'gte', 'lt', 'lte', ],\n 'period_actual_value': ['exact', 'gt', 'gte', 'lt', 'lte', ],\n 'update_method': ['exact']\n }\n project_relation = 'period__indicator__result__project__'\n\n\nclass IndicatorPeriodDataCommentViewSet(PublicProjectViewSet):\n \"\"\"\n \"\"\"\n queryset = IndicatorPeriodDataComment.objects.all()\n serializer_class = IndicatorPeriodDataCommentSerializer\n filter_fields = {\n 'data': ['exact'],\n 'data__period': ['exact'],\n 'data__period__indicator': ['exact'],\n 'data__period__indicator__result': ['exact'],\n 'data__period__indicator__result__project': ['exact'],\n 'user': ['exact'],\n 'created_at': ['exact', 'gt', 'gte', 'lt', 'lte', ],\n 'last_modified_at': ['exact', 'gt', 'gte', 'lt', 'lte', ]\n }\n project_relation = 'period__indicator__result__project__'\n\n\n@api_view(['POST'])\n@permission_classes((IsAuthenticated, ))\ndef indicator_upload_file(request, pk=None):\n \"\"\"\n Special API call for directly uploading a file.\n\n :param request; A Django request object.\n :param pk; The primary key of an IndicatorPeriodData instance.\n \"\"\"\n update = IndicatorPeriodData.objects.get(pk=pk)\n upload_file = request.FILES['file']\n\n # TODO: Permissions\n # user = request.user\n\n file_type = request.POST.copy()['type']\n if file_type == 'photo':\n update.photo = upload_file\n update.save(update_fields=['photo'])\n return Response({'file': update.photo.url})\n elif file_type == 'file':\n update.file = upload_file\n update.save(update_fields=['file'])\n return Response({'file': update.file.url})\n\n # TODO: Error response\n return Response({})\n", "path": "akvo/rest/views/indicator_period_data.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Akvo RSR is covered by the GNU Affero General Public License.\n# See more details in the license.txt file located at the root folder of the Akvo RSR module.\n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\n\nfrom akvo.rsr.models import IndicatorPeriodData, IndicatorPeriodDataComment\n\nfrom ..serializers import (IndicatorPeriodDataSerializer, IndicatorPeriodDataFrameworkSerializer,\n IndicatorPeriodDataCommentSerializer)\nfrom ..viewsets import PublicProjectViewSet\n\nfrom django.http import HttpResponseForbidden\n\nfrom rest_framework import status\nfrom rest_framework.decorators import api_view, permission_classes\nfrom rest_framework.response import Response\n\n\nclass IndicatorPeriodDataViewSet(PublicProjectViewSet):\n \"\"\"\n \"\"\"\n queryset = IndicatorPeriodData.objects.all()\n serializer_class = IndicatorPeriodDataSerializer\n filter_fields = {\n 'period': ['exact'],\n 'period__indicator': ['exact'],\n 'period__indicator__result': ['exact'],\n 'period__indicator__result__project': ['exact'],\n 'user': ['exact'],\n 'status': ['exact'],\n 'created_at': ['exact', 'gt', 'gte', 'lt', 'lte', ],\n 'last_modified_at': ['exact', 'gt', 'gte', 'lt', 'lte', ],\n 'relative_data': ['exact'],\n 'data': ['exact', 'gt', 'gte', 'lt', 'lte', ],\n 'period_actual_value': ['exact', 'gt', 'gte', 'lt', 'lte', ],\n 'update_method': ['exact']\n }\n\n project_relation = 'period__indicator__result__project__'\n\n\nclass IndicatorPeriodDataFrameworkViewSet(PublicProjectViewSet):\n \"\"\"\n \"\"\"\n queryset = IndicatorPeriodData.objects.all()\n serializer_class = IndicatorPeriodDataFrameworkSerializer\n filter_fields = {\n 'period': ['exact'],\n 'period__indicator': ['exact'],\n 'period__indicator__result': ['exact'],\n 'period__indicator__result__project': ['exact'],\n 'user': ['exact'],\n 'status': ['exact'],\n 'created_at': ['exact', 'gt', 'gte', 'lt', 'lte', ],\n 'last_modified_at': ['exact', 'gt', 'gte', 'lt', 'lte', ],\n 'relative_data': ['exact'],\n 'data': ['exact', 'gt', 'gte', 'lt', 'lte', ],\n 'period_actual_value': ['exact', 'gt', 'gte', 'lt', 'lte', ],\n 'update_method': ['exact']\n }\n project_relation = 'period__indicator__result__project__'\n\n\nclass IndicatorPeriodDataCommentViewSet(PublicProjectViewSet):\n \"\"\"\n \"\"\"\n queryset = IndicatorPeriodDataComment.objects.all()\n serializer_class = IndicatorPeriodDataCommentSerializer\n filter_fields = {\n 'data': ['exact'],\n 'data__period': ['exact'],\n 'data__period__indicator': ['exact'],\n 'data__period__indicator__result': ['exact'],\n 'data__period__indicator__result__project': ['exact'],\n 'user': ['exact'],\n 'created_at': ['exact', 'gt', 'gte', 'lt', 'lte', ],\n 'last_modified_at': ['exact', 'gt', 'gte', 'lt', 'lte', ]\n }\n project_relation = 'period__indicator__result__project__'\n\n\n@api_view(['POST'])\ndef indicator_upload_file(request, pk=None):\n \"\"\"\n Special API call for directly uploading a file.\n\n :param request; A Django request object.\n :param pk; The primary key of an IndicatorPeriodData instance.\n \"\"\"\n update = IndicatorPeriodData.objects.get(pk=pk)\n upload_file = request.FILES['file']\n\n # Permissions\n user = getattr(request, 'user', None)\n if not user:\n return Response({'error': 'User is not logged in'}, status=status.HTTP_403_FORBIDDEN)\n\n if not user.has_perm('rsr.change_project', update.period.indicator.result.project):\n return Response({'error': 'User has no permission to place an update'},\n status=status.HTTP_403_FORBIDDEN)\n\n try:\n file_type = request.POST.copy()['type']\n if file_type == 'photo':\n update.photo = upload_file\n update.save(update_fields=['photo'])\n return Response({'file': update.photo.url})\n elif file_type == 'file':\n update.file = upload_file\n update.save(update_fields=['file'])\n return Response({'file': update.file.url})\n except Exception as e:\n return Response({'error': str(e)}, status=status.HTTP_400_BAD_REQUEST)\n", "path": "akvo/rest/views/indicator_period_data.py"}]}
| 1,461 | 539 |
gh_patches_debug_65373
|
rasdani/github-patches
|
git_diff
|
TheAlgorithms__Python-11276
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Global variable should not using in class method
### Repository commit
9caf4784aada17dc75348f77cc8c356df503c0f3
### Python version (python --version)
python3.11
### Dependencies version (pip freeze)
#nop
### Expected behavior
Hi, guys. I come across one global variable that used in class method. I think using instance attribute would be better !
```python
def show_data(self):
show_list = []
for i in range(1, N + 1): ==> should using self.N instead of N !
show_list += [self.query(i, i)]
print(show_list)
```
[source](https://github.com/TheAlgorithms/Python/blob/master/data_structures/binary_tree/segment_tree.py#L101)
### Actual behavior
```python
def show_data(self):
show_list = []
for i in range(1, self.N + 1):
show_list += [self.query(i, i)]
print(show_list)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `data_structures/binary_tree/segment_tree.py`
Content:
```
1 import math
2
3
4 class SegmentTree:
5 def __init__(self, a):
6 self.A = a
7 self.N = len(self.A)
8 self.st = [0] * (
9 4 * self.N
10 ) # approximate the overall size of segment tree with array N
11 if self.N:
12 self.build(1, 0, self.N - 1)
13
14 def left(self, idx):
15 """
16 Returns the left child index for a given index in a binary tree.
17
18 >>> s = SegmentTree([1, 2, 3])
19 >>> s.left(1)
20 2
21 >>> s.left(2)
22 4
23 """
24 return idx * 2
25
26 def right(self, idx):
27 """
28 Returns the right child index for a given index in a binary tree.
29
30 >>> s = SegmentTree([1, 2, 3])
31 >>> s.right(1)
32 3
33 >>> s.right(2)
34 5
35 """
36 return idx * 2 + 1
37
38 def build(self, idx, l, r): # noqa: E741
39 if l == r:
40 self.st[idx] = self.A[l]
41 else:
42 mid = (l + r) // 2
43 self.build(self.left(idx), l, mid)
44 self.build(self.right(idx), mid + 1, r)
45 self.st[idx] = max(self.st[self.left(idx)], self.st[self.right(idx)])
46
47 def update(self, a, b, val):
48 """
49 Update the values in the segment tree in the range [a,b] with the given value.
50
51 >>> s = SegmentTree([1, 2, 3, 4, 5])
52 >>> s.update(2, 4, 10)
53 True
54 >>> s.query(1, 5)
55 10
56 """
57 return self.update_recursive(1, 0, self.N - 1, a - 1, b - 1, val)
58
59 def update_recursive(self, idx, l, r, a, b, val): # noqa: E741
60 """
61 update(1, 1, N, a, b, v) for update val v to [a,b]
62 """
63 if r < a or l > b:
64 return True
65 if l == r:
66 self.st[idx] = val
67 return True
68 mid = (l + r) // 2
69 self.update_recursive(self.left(idx), l, mid, a, b, val)
70 self.update_recursive(self.right(idx), mid + 1, r, a, b, val)
71 self.st[idx] = max(self.st[self.left(idx)], self.st[self.right(idx)])
72 return True
73
74 def query(self, a, b):
75 """
76 Query the maximum value in the range [a,b].
77
78 >>> s = SegmentTree([1, 2, 3, 4, 5])
79 >>> s.query(1, 3)
80 3
81 >>> s.query(1, 5)
82 5
83 """
84 return self.query_recursive(1, 0, self.N - 1, a - 1, b - 1)
85
86 def query_recursive(self, idx, l, r, a, b): # noqa: E741
87 """
88 query(1, 1, N, a, b) for query max of [a,b]
89 """
90 if r < a or l > b:
91 return -math.inf
92 if l >= a and r <= b:
93 return self.st[idx]
94 mid = (l + r) // 2
95 q1 = self.query_recursive(self.left(idx), l, mid, a, b)
96 q2 = self.query_recursive(self.right(idx), mid + 1, r, a, b)
97 return max(q1, q2)
98
99 def show_data(self):
100 show_list = []
101 for i in range(1, N + 1):
102 show_list += [self.query(i, i)]
103 print(show_list)
104
105
106 if __name__ == "__main__":
107 A = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8]
108 N = 15
109 segt = SegmentTree(A)
110 print(segt.query(4, 6))
111 print(segt.query(7, 11))
112 print(segt.query(7, 12))
113 segt.update(1, 3, 111)
114 print(segt.query(1, 15))
115 segt.update(7, 8, 235)
116 segt.show_data()
117
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/data_structures/binary_tree/segment_tree.py b/data_structures/binary_tree/segment_tree.py
--- a/data_structures/binary_tree/segment_tree.py
+++ b/data_structures/binary_tree/segment_tree.py
@@ -98,7 +98,7 @@
def show_data(self):
show_list = []
- for i in range(1, N + 1):
+ for i in range(1, self.N + 1):
show_list += [self.query(i, i)]
print(show_list)
|
{"golden_diff": "diff --git a/data_structures/binary_tree/segment_tree.py b/data_structures/binary_tree/segment_tree.py\n--- a/data_structures/binary_tree/segment_tree.py\n+++ b/data_structures/binary_tree/segment_tree.py\n@@ -98,7 +98,7 @@\n \n def show_data(self):\n show_list = []\n- for i in range(1, N + 1):\n+ for i in range(1, self.N + 1):\n show_list += [self.query(i, i)]\n print(show_list)\n", "issue": "Global variable should not using in class method\n### Repository commit\n\n9caf4784aada17dc75348f77cc8c356df503c0f3\n\n### Python version (python --version)\n\npython3.11\n\n### Dependencies version (pip freeze)\n\n#nop \n\n### Expected behavior\n\nHi, guys. I come across one global variable that used in class method. I think using instance attribute would be better !\r\n```python\r\n def show_data(self):\r\n show_list = []\r\n for i in range(1, N + 1): ==> should using self.N instead of N !\r\n show_list += [self.query(i, i)]\r\n print(show_list)\r\n```\r\n[source](https://github.com/TheAlgorithms/Python/blob/master/data_structures/binary_tree/segment_tree.py#L101)\r\n\r\n\n\n### Actual behavior\n\n```python\r\n def show_data(self):\r\n show_list = []\r\n for i in range(1, self.N + 1): \r\n show_list += [self.query(i, i)]\r\n print(show_list)\r\n```\n", "before_files": [{"content": "import math\n\n\nclass SegmentTree:\n def __init__(self, a):\n self.A = a\n self.N = len(self.A)\n self.st = [0] * (\n 4 * self.N\n ) # approximate the overall size of segment tree with array N\n if self.N:\n self.build(1, 0, self.N - 1)\n\n def left(self, idx):\n \"\"\"\n Returns the left child index for a given index in a binary tree.\n\n >>> s = SegmentTree([1, 2, 3])\n >>> s.left(1)\n 2\n >>> s.left(2)\n 4\n \"\"\"\n return idx * 2\n\n def right(self, idx):\n \"\"\"\n Returns the right child index for a given index in a binary tree.\n\n >>> s = SegmentTree([1, 2, 3])\n >>> s.right(1)\n 3\n >>> s.right(2)\n 5\n \"\"\"\n return idx * 2 + 1\n\n def build(self, idx, l, r): # noqa: E741\n if l == r:\n self.st[idx] = self.A[l]\n else:\n mid = (l + r) // 2\n self.build(self.left(idx), l, mid)\n self.build(self.right(idx), mid + 1, r)\n self.st[idx] = max(self.st[self.left(idx)], self.st[self.right(idx)])\n\n def update(self, a, b, val):\n \"\"\"\n Update the values in the segment tree in the range [a,b] with the given value.\n\n >>> s = SegmentTree([1, 2, 3, 4, 5])\n >>> s.update(2, 4, 10)\n True\n >>> s.query(1, 5)\n 10\n \"\"\"\n return self.update_recursive(1, 0, self.N - 1, a - 1, b - 1, val)\n\n def update_recursive(self, idx, l, r, a, b, val): # noqa: E741\n \"\"\"\n update(1, 1, N, a, b, v) for update val v to [a,b]\n \"\"\"\n if r < a or l > b:\n return True\n if l == r:\n self.st[idx] = val\n return True\n mid = (l + r) // 2\n self.update_recursive(self.left(idx), l, mid, a, b, val)\n self.update_recursive(self.right(idx), mid + 1, r, a, b, val)\n self.st[idx] = max(self.st[self.left(idx)], self.st[self.right(idx)])\n return True\n\n def query(self, a, b):\n \"\"\"\n Query the maximum value in the range [a,b].\n\n >>> s = SegmentTree([1, 2, 3, 4, 5])\n >>> s.query(1, 3)\n 3\n >>> s.query(1, 5)\n 5\n \"\"\"\n return self.query_recursive(1, 0, self.N - 1, a - 1, b - 1)\n\n def query_recursive(self, idx, l, r, a, b): # noqa: E741\n \"\"\"\n query(1, 1, N, a, b) for query max of [a,b]\n \"\"\"\n if r < a or l > b:\n return -math.inf\n if l >= a and r <= b:\n return self.st[idx]\n mid = (l + r) // 2\n q1 = self.query_recursive(self.left(idx), l, mid, a, b)\n q2 = self.query_recursive(self.right(idx), mid + 1, r, a, b)\n return max(q1, q2)\n\n def show_data(self):\n show_list = []\n for i in range(1, N + 1):\n show_list += [self.query(i, i)]\n print(show_list)\n\n\nif __name__ == \"__main__\":\n A = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8]\n N = 15\n segt = SegmentTree(A)\n print(segt.query(4, 6))\n print(segt.query(7, 11))\n print(segt.query(7, 12))\n segt.update(1, 3, 111)\n print(segt.query(1, 15))\n segt.update(7, 8, 235)\n segt.show_data()\n", "path": "data_structures/binary_tree/segment_tree.py"}], "after_files": [{"content": "import math\n\n\nclass SegmentTree:\n def __init__(self, a):\n self.A = a\n self.N = len(self.A)\n self.st = [0] * (\n 4 * self.N\n ) # approximate the overall size of segment tree with array N\n if self.N:\n self.build(1, 0, self.N - 1)\n\n def left(self, idx):\n \"\"\"\n Returns the left child index for a given index in a binary tree.\n\n >>> s = SegmentTree([1, 2, 3])\n >>> s.left(1)\n 2\n >>> s.left(2)\n 4\n \"\"\"\n return idx * 2\n\n def right(self, idx):\n \"\"\"\n Returns the right child index for a given index in a binary tree.\n\n >>> s = SegmentTree([1, 2, 3])\n >>> s.right(1)\n 3\n >>> s.right(2)\n 5\n \"\"\"\n return idx * 2 + 1\n\n def build(self, idx, l, r): # noqa: E741\n if l == r:\n self.st[idx] = self.A[l]\n else:\n mid = (l + r) // 2\n self.build(self.left(idx), l, mid)\n self.build(self.right(idx), mid + 1, r)\n self.st[idx] = max(self.st[self.left(idx)], self.st[self.right(idx)])\n\n def update(self, a, b, val):\n \"\"\"\n Update the values in the segment tree in the range [a,b] with the given value.\n\n >>> s = SegmentTree([1, 2, 3, 4, 5])\n >>> s.update(2, 4, 10)\n True\n >>> s.query(1, 5)\n 10\n \"\"\"\n return self.update_recursive(1, 0, self.N - 1, a - 1, b - 1, val)\n\n def update_recursive(self, idx, l, r, a, b, val): # noqa: E741\n \"\"\"\n update(1, 1, N, a, b, v) for update val v to [a,b]\n \"\"\"\n if r < a or l > b:\n return True\n if l == r:\n self.st[idx] = val\n return True\n mid = (l + r) // 2\n self.update_recursive(self.left(idx), l, mid, a, b, val)\n self.update_recursive(self.right(idx), mid + 1, r, a, b, val)\n self.st[idx] = max(self.st[self.left(idx)], self.st[self.right(idx)])\n return True\n\n def query(self, a, b):\n \"\"\"\n Query the maximum value in the range [a,b].\n\n >>> s = SegmentTree([1, 2, 3, 4, 5])\n >>> s.query(1, 3)\n 3\n >>> s.query(1, 5)\n 5\n \"\"\"\n return self.query_recursive(1, 0, self.N - 1, a - 1, b - 1)\n\n def query_recursive(self, idx, l, r, a, b): # noqa: E741\n \"\"\"\n query(1, 1, N, a, b) for query max of [a,b]\n \"\"\"\n if r < a or l > b:\n return -math.inf\n if l >= a and r <= b:\n return self.st[idx]\n mid = (l + r) // 2\n q1 = self.query_recursive(self.left(idx), l, mid, a, b)\n q2 = self.query_recursive(self.right(idx), mid + 1, r, a, b)\n return max(q1, q2)\n\n def show_data(self):\n show_list = []\n for i in range(1, self.N + 1):\n show_list += [self.query(i, i)]\n print(show_list)\n\n\nif __name__ == \"__main__\":\n A = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8]\n N = 15\n segt = SegmentTree(A)\n print(segt.query(4, 6))\n print(segt.query(7, 11))\n print(segt.query(7, 12))\n segt.update(1, 3, 111)\n print(segt.query(1, 15))\n segt.update(7, 8, 235)\n segt.show_data()\n", "path": "data_structures/binary_tree/segment_tree.py"}]}
| 1,804 | 121 |
gh_patches_debug_896
|
rasdani/github-patches
|
git_diff
|
bridgecrewio__checkov-1228
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
boto3 is fixed at the patch level version
**Is your feature request related to a problem? Please describe.**
free boto3 dependency patch version.
**Describe the solution you'd like**
replace the line here:
https://github.com/bridgecrewio/checkov/blob/master/Pipfile#L29
with
```
boto3 = "==1.17.*"
```
**Describe alternatives you've considered**
there are no alternatives as the patch version i don't see why is locked.
it can cause conflicts with already installed boto3 library
**Additional context**
boto3 dependency install latest patch version
By submitting this pull request, I confirm that my contribution is made under the terms of the Apache 2.0 license.
Fixes #1211
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2 import logging
3 import os
4 from importlib import util
5 from os import path
6
7 import setuptools
8 from setuptools import setup
9
10 # read the contents of your README file
11 this_directory = path.abspath(path.dirname(__file__))
12 with open(path.join(this_directory, "README.md"), encoding="utf-8") as f:
13 long_description = f.read()
14
15 logger = logging.getLogger(__name__)
16 spec = util.spec_from_file_location(
17 "checkov.version", os.path.join("checkov", "version.py")
18 )
19 # noinspection PyUnresolvedReferences
20 mod = util.module_from_spec(spec)
21 spec.loader.exec_module(mod) # type: ignore
22 version = mod.version # type: ignore
23
24 setup(
25 extras_require={
26 "dev": [
27 "pytest==5.3.1",
28 "coverage",
29 "coverage-badge",
30 "GitPython==3.1.7",
31 "bandit"
32 ]
33 },
34 install_requires=[
35 "bc-python-hcl2>=0.3.18",
36 "cloudsplaining>=0.4.1",
37 "deep_merge",
38 "tabulate",
39 "colorama",
40 "termcolor",
41 "junit-xml",
42 "dpath>=1.5.0,<2",
43 "pyyaml>=5.4.1",
44 "boto3==1.17.27",
45 "GitPython",
46 "six==1.15.0",
47 "jmespath",
48 "tqdm",
49 "update_checker",
50 "semantic_version",
51 "packaging",
52 "networkx",
53 "dockerfile-parse",
54 "docker"
55 ],
56 license="Apache License 2.0",
57 name="checkov",
58 version=version,
59 python_requires=">=3.7",
60 description="Infrastructure as code static analysis",
61 author="bridgecrew",
62 author_email="[email protected]",
63 url="https://github.com/nimrodkor/checkov",
64 packages=setuptools.find_packages(exclude=["tests*","integration_tests*"]),
65 include_package_data=True,
66 package_dir={'checkov.terraform.checks.graph_checks': 'checkov/terraform/checks/graph_checks'},
67 package_data = {'checkov.terraform.checks.graph_checks': ['aws/*.yaml', 'gcp/*.yaml', 'azure/*.yaml']},
68 scripts=["bin/checkov", "bin/checkov.cmd"],
69 long_description=long_description,
70 long_description_content_type="text/markdown",
71 classifiers=[
72 'Environment :: Console',
73 'Intended Audience :: Developers',
74 'Intended Audience :: System Administrators',
75 'Programming Language :: Python :: 3.7',
76 'Programming Language :: Python :: 3.8',
77 'Programming Language :: Python :: 3.9',
78 'Topic :: Security',
79 'Topic :: Software Development :: Build Tools'
80 ]
81 )
82
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -41,7 +41,7 @@
"junit-xml",
"dpath>=1.5.0,<2",
"pyyaml>=5.4.1",
- "boto3==1.17.27",
+ "boto3==1.17.*",
"GitPython",
"six==1.15.0",
"jmespath",
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -41,7 +41,7 @@\n \"junit-xml\",\n \"dpath>=1.5.0,<2\",\n \"pyyaml>=5.4.1\",\n- \"boto3==1.17.27\",\n+ \"boto3==1.17.*\",\n \"GitPython\",\n \"six==1.15.0\",\n \"jmespath\",\n", "issue": "boto3 is fixed at the patch level version\n**Is your feature request related to a problem? Please describe.**\r\nfree boto3 dependency patch version.\r\n\r\n**Describe the solution you'd like**\r\nreplace the line here:\r\nhttps://github.com/bridgecrewio/checkov/blob/master/Pipfile#L29\r\n\r\nwith \r\n```\r\nboto3 = \"==1.17.*\"\r\n```\r\n**Describe alternatives you've considered**\r\nthere are no alternatives as the patch version i don't see why is locked.\r\nit can cause conflicts with already installed boto3 library \r\n\r\n**Additional context**\r\n\r\n\nboto3 dependency install latest patch version\nBy submitting this pull request, I confirm that my contribution is made under the terms of the Apache 2.0 license.\r\n\r\nFixes #1211 \r\n\n", "before_files": [{"content": "#!/usr/bin/env python\nimport logging\nimport os\nfrom importlib import util\nfrom os import path\n\nimport setuptools\nfrom setuptools import setup\n\n# read the contents of your README file\nthis_directory = path.abspath(path.dirname(__file__))\nwith open(path.join(this_directory, \"README.md\"), encoding=\"utf-8\") as f:\n long_description = f.read()\n\nlogger = logging.getLogger(__name__)\nspec = util.spec_from_file_location(\n \"checkov.version\", os.path.join(\"checkov\", \"version.py\")\n)\n# noinspection PyUnresolvedReferences\nmod = util.module_from_spec(spec)\nspec.loader.exec_module(mod) # type: ignore\nversion = mod.version # type: ignore\n\nsetup(\n extras_require={\n \"dev\": [\n \"pytest==5.3.1\",\n \"coverage\",\n \"coverage-badge\",\n \"GitPython==3.1.7\",\n \"bandit\"\n ]\n },\n install_requires=[\n \"bc-python-hcl2>=0.3.18\",\n \"cloudsplaining>=0.4.1\",\n \"deep_merge\",\n \"tabulate\",\n \"colorama\",\n \"termcolor\",\n \"junit-xml\",\n \"dpath>=1.5.0,<2\",\n \"pyyaml>=5.4.1\",\n \"boto3==1.17.27\",\n \"GitPython\",\n \"six==1.15.0\",\n \"jmespath\",\n \"tqdm\",\n \"update_checker\",\n \"semantic_version\",\n \"packaging\",\n \"networkx\",\n \"dockerfile-parse\",\n \"docker\"\n ],\n license=\"Apache License 2.0\",\n name=\"checkov\",\n version=version,\n python_requires=\">=3.7\",\n description=\"Infrastructure as code static analysis\",\n author=\"bridgecrew\",\n author_email=\"[email protected]\",\n url=\"https://github.com/nimrodkor/checkov\",\n packages=setuptools.find_packages(exclude=[\"tests*\",\"integration_tests*\"]),\n include_package_data=True,\n package_dir={'checkov.terraform.checks.graph_checks': 'checkov/terraform/checks/graph_checks'},\n package_data = {'checkov.terraform.checks.graph_checks': ['aws/*.yaml', 'gcp/*.yaml', 'azure/*.yaml']},\n scripts=[\"bin/checkov\", \"bin/checkov.cmd\"],\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n classifiers=[\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n 'Topic :: Security',\n 'Topic :: Software Development :: Build Tools'\n ]\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\nimport logging\nimport os\nfrom importlib import util\nfrom os import path\n\nimport setuptools\nfrom setuptools import setup\n\n# read the contents of your README file\nthis_directory = path.abspath(path.dirname(__file__))\nwith open(path.join(this_directory, \"README.md\"), encoding=\"utf-8\") as f:\n long_description = f.read()\n\nlogger = logging.getLogger(__name__)\nspec = util.spec_from_file_location(\n \"checkov.version\", os.path.join(\"checkov\", \"version.py\")\n)\n# noinspection PyUnresolvedReferences\nmod = util.module_from_spec(spec)\nspec.loader.exec_module(mod) # type: ignore\nversion = mod.version # type: ignore\n\nsetup(\n extras_require={\n \"dev\": [\n \"pytest==5.3.1\",\n \"coverage\",\n \"coverage-badge\",\n \"GitPython==3.1.7\",\n \"bandit\"\n ]\n },\n install_requires=[\n \"bc-python-hcl2>=0.3.18\",\n \"cloudsplaining>=0.4.1\",\n \"deep_merge\",\n \"tabulate\",\n \"colorama\",\n \"termcolor\",\n \"junit-xml\",\n \"dpath>=1.5.0,<2\",\n \"pyyaml>=5.4.1\",\n \"boto3==1.17.*\",\n \"GitPython\",\n \"six==1.15.0\",\n \"jmespath\",\n \"tqdm\",\n \"update_checker\",\n \"semantic_version\",\n \"packaging\",\n \"networkx\",\n \"dockerfile-parse\",\n \"docker\"\n ],\n license=\"Apache License 2.0\",\n name=\"checkov\",\n version=version,\n python_requires=\">=3.7\",\n description=\"Infrastructure as code static analysis\",\n author=\"bridgecrew\",\n author_email=\"[email protected]\",\n url=\"https://github.com/nimrodkor/checkov\",\n packages=setuptools.find_packages(exclude=[\"tests*\",\"integration_tests*\"]),\n include_package_data=True,\n package_dir={'checkov.terraform.checks.graph_checks': 'checkov/terraform/checks/graph_checks'},\n package_data = {'checkov.terraform.checks.graph_checks': ['aws/*.yaml', 'gcp/*.yaml', 'azure/*.yaml']},\n scripts=[\"bin/checkov\", \"bin/checkov.cmd\"],\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n classifiers=[\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n 'Topic :: Security',\n 'Topic :: Software Development :: Build Tools'\n ]\n)\n", "path": "setup.py"}]}
| 1,192 | 113 |
gh_patches_debug_6047
|
rasdani/github-patches
|
git_diff
|
liqd__a4-meinberlin-2151
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
testing #1824 wrong wording
Dashboard/Newsletter: the Word "Empfänger*innen" is at the wrong place. Please delete it above and replace "Projekt" by "Empfänger*innen sind alle, die diesem Projekt folgen:"
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `meinberlin/apps/newsletters/forms.py`
Content:
```
1 from django import forms
2 from django.apps import apps
3 from django.conf import settings
4 from django.utils.translation import ugettext_lazy as _
5
6 from adhocracy4.projects.models import Project
7
8 from . import models
9
10 Organisation = apps.get_model(settings.A4_ORGANISATIONS_MODEL)
11
12
13 class NewsletterForm(forms.ModelForm):
14 class Meta:
15 model = models.Newsletter
16 fields = ['sender_name', 'sender', 'receivers', 'project',
17 'organisation', 'subject', 'body']
18
19 def __init__(self, user=None, organisation=None, *args, **kwargs):
20 super().__init__(*args, **kwargs)
21 choices = [(value, string)
22 for value, string in models.RECEIVER_CHOICES
23 if value != models.PLATFORM or (user and user.is_superuser)]
24 self.fields['receivers'] = forms.ChoiceField(
25 label=_('Receivers'),
26 choices=choices,
27 widget=forms.RadioSelect(),
28 )
29
30 project_qs = Project.objects
31 if organisation:
32 project_qs = Project.objects.filter(organisation=organisation.id)
33
34 self.fields['project'] = forms.ModelChoiceField(
35 label=_('Project'),
36 queryset=project_qs,
37 required=False, empty_label=None)
38
39 self.fields['organisation'] = forms.ModelChoiceField(
40 label=_('Organisation'),
41 queryset=Organisation.objects,
42 required=False, empty_label=None)
43
44 def clean(self):
45 cleaned_data = super().clean()
46 if cleaned_data.get('receivers') == str(models.PROJECT) and \
47 not cleaned_data.get('project'):
48 self.add_error('project', _('Select a Project'))
49
50
51 class RestrictedNewsletterForm(forms.ModelForm):
52
53 class Meta:
54 model = models.Newsletter
55 fields = ['sender_name', 'sender', 'project', 'receivers',
56 'organisation', 'subject', 'body']
57
58 def __init__(self, user=None, organisation=None, *args, **kwargs):
59 super().__init__(*args, **kwargs)
60
61 self.fields['receivers'].widget = forms.HiddenInput()
62
63 project_qs = Project.objects
64 if organisation:
65 project_qs = Project.objects.filter(organisation=organisation.id)
66 if user and not user.is_superuser:
67 user_groups = user.groups.all()
68 org_groups = organisation.groups.all()
69 shared_groups = user_groups & org_groups
70 group = shared_groups.distinct().first()
71 project_qs = project_qs.filter(group=group)
72
73 self.fields['project'] = forms.ModelChoiceField(
74 label=_('Project'),
75 queryset=project_qs,
76 required=False, empty_label=None)
77
78 self.fields['organisation'] = forms.ModelChoiceField(
79 label=_('Organisation'),
80 queryset=Organisation.objects,
81 required=False, empty_label=None)
82
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/meinberlin/apps/newsletters/forms.py b/meinberlin/apps/newsletters/forms.py
--- a/meinberlin/apps/newsletters/forms.py
+++ b/meinberlin/apps/newsletters/forms.py
@@ -74,6 +74,8 @@
label=_('Project'),
queryset=project_qs,
required=False, empty_label=None)
+ self.fields['project'].label = _('Receivers are all users '
+ 'which follow the following project:')
self.fields['organisation'] = forms.ModelChoiceField(
label=_('Organisation'),
|
{"golden_diff": "diff --git a/meinberlin/apps/newsletters/forms.py b/meinberlin/apps/newsletters/forms.py\n--- a/meinberlin/apps/newsletters/forms.py\n+++ b/meinberlin/apps/newsletters/forms.py\n@@ -74,6 +74,8 @@\n label=_('Project'),\n queryset=project_qs,\n required=False, empty_label=None)\n+ self.fields['project'].label = _('Receivers are all users '\n+ 'which follow the following project:')\n \n self.fields['organisation'] = forms.ModelChoiceField(\n label=_('Organisation'),\n", "issue": "testing #1824 wrong wording\nDashboard/Newsletter: the Word \"Empf\u00e4nger*innen\" is at the wrong place. Please delete it above and replace \"Projekt\" by \"Empf\u00e4nger*innen sind alle, die diesem Projekt folgen:\"\r\n \r\n\n", "before_files": [{"content": "from django import forms\nfrom django.apps import apps\nfrom django.conf import settings\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom adhocracy4.projects.models import Project\n\nfrom . import models\n\nOrganisation = apps.get_model(settings.A4_ORGANISATIONS_MODEL)\n\n\nclass NewsletterForm(forms.ModelForm):\n class Meta:\n model = models.Newsletter\n fields = ['sender_name', 'sender', 'receivers', 'project',\n 'organisation', 'subject', 'body']\n\n def __init__(self, user=None, organisation=None, *args, **kwargs):\n super().__init__(*args, **kwargs)\n choices = [(value, string)\n for value, string in models.RECEIVER_CHOICES\n if value != models.PLATFORM or (user and user.is_superuser)]\n self.fields['receivers'] = forms.ChoiceField(\n label=_('Receivers'),\n choices=choices,\n widget=forms.RadioSelect(),\n )\n\n project_qs = Project.objects\n if organisation:\n project_qs = Project.objects.filter(organisation=organisation.id)\n\n self.fields['project'] = forms.ModelChoiceField(\n label=_('Project'),\n queryset=project_qs,\n required=False, empty_label=None)\n\n self.fields['organisation'] = forms.ModelChoiceField(\n label=_('Organisation'),\n queryset=Organisation.objects,\n required=False, empty_label=None)\n\n def clean(self):\n cleaned_data = super().clean()\n if cleaned_data.get('receivers') == str(models.PROJECT) and \\\n not cleaned_data.get('project'):\n self.add_error('project', _('Select a Project'))\n\n\nclass RestrictedNewsletterForm(forms.ModelForm):\n\n class Meta:\n model = models.Newsletter\n fields = ['sender_name', 'sender', 'project', 'receivers',\n 'organisation', 'subject', 'body']\n\n def __init__(self, user=None, organisation=None, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n self.fields['receivers'].widget = forms.HiddenInput()\n\n project_qs = Project.objects\n if organisation:\n project_qs = Project.objects.filter(organisation=organisation.id)\n if user and not user.is_superuser:\n user_groups = user.groups.all()\n org_groups = organisation.groups.all()\n shared_groups = user_groups & org_groups\n group = shared_groups.distinct().first()\n project_qs = project_qs.filter(group=group)\n\n self.fields['project'] = forms.ModelChoiceField(\n label=_('Project'),\n queryset=project_qs,\n required=False, empty_label=None)\n\n self.fields['organisation'] = forms.ModelChoiceField(\n label=_('Organisation'),\n queryset=Organisation.objects,\n required=False, empty_label=None)\n", "path": "meinberlin/apps/newsletters/forms.py"}], "after_files": [{"content": "from django import forms\nfrom django.apps import apps\nfrom django.conf import settings\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom adhocracy4.projects.models import Project\n\nfrom . import models\n\nOrganisation = apps.get_model(settings.A4_ORGANISATIONS_MODEL)\n\n\nclass NewsletterForm(forms.ModelForm):\n class Meta:\n model = models.Newsletter\n fields = ['sender_name', 'sender', 'receivers', 'project',\n 'organisation', 'subject', 'body']\n\n def __init__(self, user=None, organisation=None, *args, **kwargs):\n super().__init__(*args, **kwargs)\n choices = [(value, string)\n for value, string in models.RECEIVER_CHOICES\n if value != models.PLATFORM or (user and user.is_superuser)]\n self.fields['receivers'] = forms.ChoiceField(\n label=_('Receivers'),\n choices=choices,\n widget=forms.RadioSelect(),\n )\n\n project_qs = Project.objects\n if organisation:\n project_qs = Project.objects.filter(organisation=organisation.id)\n\n self.fields['project'] = forms.ModelChoiceField(\n label=_('Project'),\n queryset=project_qs,\n required=False, empty_label=None)\n\n self.fields['organisation'] = forms.ModelChoiceField(\n label=_('Organisation'),\n queryset=Organisation.objects,\n required=False, empty_label=None)\n\n def clean(self):\n cleaned_data = super().clean()\n if cleaned_data.get('receivers') == str(models.PROJECT) and \\\n not cleaned_data.get('project'):\n self.add_error('project', _('Select a Project'))\n\n\nclass RestrictedNewsletterForm(forms.ModelForm):\n\n class Meta:\n model = models.Newsletter\n fields = ['sender_name', 'sender', 'project', 'receivers',\n 'organisation', 'subject', 'body']\n\n def __init__(self, user=None, organisation=None, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n self.fields['receivers'].widget = forms.HiddenInput()\n\n project_qs = Project.objects\n if organisation:\n project_qs = Project.objects.filter(organisation=organisation.id)\n if user and not user.is_superuser:\n user_groups = user.groups.all()\n org_groups = organisation.groups.all()\n shared_groups = user_groups & org_groups\n group = shared_groups.distinct().first()\n project_qs = project_qs.filter(group=group)\n\n self.fields['project'] = forms.ModelChoiceField(\n label=_('Project'),\n queryset=project_qs,\n required=False, empty_label=None)\n self.fields['project'].label = _('Receivers are all users '\n 'which follow the following project:')\n\n self.fields['organisation'] = forms.ModelChoiceField(\n label=_('Organisation'),\n queryset=Organisation.objects,\n required=False, empty_label=None)\n", "path": "meinberlin/apps/newsletters/forms.py"}]}
| 1,061 | 124 |
gh_patches_debug_20413
|
rasdani/github-patches
|
git_diff
|
flairNLP__flair-198
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Generate text with language model
Is there any script that can generate text from trained language model?
Also calculating perplexity on testing data would be helpful.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `flair/models/language_model.py`
Content:
```
1 import torch.nn as nn
2 import torch
3 import math
4 from torch.autograd import Variable
5 from typing import Dict, List
6 from flair.data import Dictionary
7
8
9 class LanguageModel(nn.Module):
10 """Container module with an encoder, a recurrent module, and a decoder."""
11
12 def __init__(self,
13 dictionary: Dictionary,
14 is_forward_lm: bool,
15 hidden_size: int,
16 nlayers: int,
17 embedding_size: int = 100,
18 nout=None,
19 dropout=0.5):
20
21 super(LanguageModel, self).__init__()
22
23 self.dictionary = dictionary
24 self.is_forward_lm: bool = is_forward_lm
25
26 self.dropout = dropout
27 self.hidden_size = hidden_size
28 self.embedding_size = embedding_size
29 self.nlayers = nlayers
30
31 self.drop = nn.Dropout(dropout)
32 self.encoder = nn.Embedding(len(dictionary), embedding_size)
33
34 if nlayers == 1:
35 self.rnn = nn.LSTM(embedding_size, hidden_size, nlayers)
36 else:
37 self.rnn = nn.LSTM(embedding_size, hidden_size, nlayers, dropout=dropout)
38
39 self.hidden = None
40
41 self.nout = nout
42 if nout is not None:
43 self.proj = nn.Linear(hidden_size, nout)
44 self.initialize(self.proj.weight)
45 self.decoder = nn.Linear(nout, len(dictionary))
46 else:
47 self.proj = None
48 self.decoder = nn.Linear(hidden_size, len(dictionary))
49
50 self.init_weights()
51
52 # auto-spawn on GPU if available
53 if torch.cuda.is_available():
54 self.cuda()
55
56 def init_weights(self):
57 initrange = 0.1
58 self.encoder.weight.data.uniform_(-initrange, initrange)
59 self.decoder.bias.data.fill_(0)
60 self.decoder.weight.data.uniform_(-initrange, initrange)
61
62 def set_hidden(self, hidden):
63 self.hidden = hidden
64
65 def forward(self, input, hidden, ordered_sequence_lengths=None):
66 encoded = self.encoder(input)
67 emb = self.drop(encoded)
68
69 self.rnn.flatten_parameters()
70
71 output, hidden = self.rnn(emb, hidden)
72
73 if self.proj is not None:
74 output = self.proj(output)
75
76 output = self.drop(output)
77
78 decoded = self.decoder(output.view(output.size(0) * output.size(1), output.size(2)))
79
80 return decoded.view(output.size(0), output.size(1), decoded.size(1)), output, hidden
81
82 def init_hidden(self, bsz):
83 weight = next(self.parameters()).data
84 return (Variable(weight.new(self.nlayers, bsz, self.hidden_size).zero_()),
85 Variable(weight.new(self.nlayers, bsz, self.hidden_size).zero_()))
86
87 def get_representation(self, strings: List[str], detach_from_lm=True):
88
89 sequences_as_char_indices: List[List[int]] = []
90 for string in strings:
91 char_indices = [self.dictionary.get_idx_for_item(char) for char in string]
92 sequences_as_char_indices.append(char_indices)
93
94 batch = Variable(torch.LongTensor(sequences_as_char_indices).transpose(0, 1))
95
96 if torch.cuda.is_available():
97 batch = batch.cuda()
98
99 hidden = self.init_hidden(len(strings))
100 prediction, rnn_output, hidden = self.forward(batch, hidden)
101
102 if detach_from_lm: rnn_output = self.repackage_hidden(rnn_output)
103
104 return rnn_output
105
106 def repackage_hidden(self, h):
107 """Wraps hidden states in new Variables, to detach them from their history."""
108 if type(h) == torch.Tensor:
109 return Variable(h.data)
110 else:
111 return tuple(self.repackage_hidden(v) for v in h)
112
113 def initialize(self, matrix):
114 in_, out_ = matrix.size()
115 stdv = math.sqrt(3. / (in_ + out_))
116 matrix.data.uniform_(-stdv, stdv)
117
118 @classmethod
119 def load_language_model(cls, model_file):
120
121 if not torch.cuda.is_available():
122 state = torch.load(model_file, map_location='cpu')
123 else:
124 state = torch.load(model_file)
125
126 model = LanguageModel(state['dictionary'],
127 state['is_forward_lm'],
128 state['hidden_size'],
129 state['nlayers'],
130 state['embedding_size'],
131 state['nout'],
132 state['dropout'])
133 model.load_state_dict(state['state_dict'])
134 model.eval()
135 if torch.cuda.is_available():
136 model.cuda()
137 return model
138
139 def save(self, file):
140 model_state = {
141 'state_dict': self.state_dict(),
142 'dictionary': self.dictionary,
143 'is_forward_lm': self.is_forward_lm,
144 'hidden_size': self.hidden_size,
145 'nlayers': self.nlayers,
146 'embedding_size': self.embedding_size,
147 'nout': self.nout,
148 'dropout': self.dropout
149 }
150 torch.save(model_state, file, pickle_protocol=4)
151
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/flair/models/language_model.py b/flair/models/language_model.py
--- a/flair/models/language_model.py
+++ b/flair/models/language_model.py
@@ -2,7 +2,7 @@
import torch
import math
from torch.autograd import Variable
-from typing import Dict, List
+from typing import List
from flair.data import Dictionary
@@ -148,3 +148,24 @@
'dropout': self.dropout
}
torch.save(model_state, file, pickle_protocol=4)
+
+ def generate_text(self, number_of_characters=1000) -> str:
+ characters = []
+
+ idx2item = self.dictionary.idx2item
+
+ # initial hidden state
+ hidden = self.init_hidden(1)
+ input = torch.rand(1, 1).mul(len(idx2item)).long()
+ if torch.cuda.is_available():
+ input = input.cuda()
+
+ for i in range(number_of_characters):
+ prediction, rnn_output, hidden = self.forward(input, hidden)
+ word_weights = prediction.squeeze().data.div(1.0).exp().cpu()
+ word_idx = torch.multinomial(word_weights, 1)[0]
+ input.data.fill_(word_idx)
+ word = idx2item[word_idx].decode('UTF-8')
+ characters.append(word)
+
+ return ''.join(characters)
|
{"golden_diff": "diff --git a/flair/models/language_model.py b/flair/models/language_model.py\n--- a/flair/models/language_model.py\n+++ b/flair/models/language_model.py\n@@ -2,7 +2,7 @@\n import torch\n import math\n from torch.autograd import Variable\n-from typing import Dict, List\n+from typing import List\n from flair.data import Dictionary\n \n \n@@ -148,3 +148,24 @@\n 'dropout': self.dropout\n }\n torch.save(model_state, file, pickle_protocol=4)\n+\n+ def generate_text(self, number_of_characters=1000) -> str:\n+ characters = []\n+\n+ idx2item = self.dictionary.idx2item\n+\n+ # initial hidden state\n+ hidden = self.init_hidden(1)\n+ input = torch.rand(1, 1).mul(len(idx2item)).long()\n+ if torch.cuda.is_available():\n+ input = input.cuda()\n+\n+ for i in range(number_of_characters):\n+ prediction, rnn_output, hidden = self.forward(input, hidden)\n+ word_weights = prediction.squeeze().data.div(1.0).exp().cpu()\n+ word_idx = torch.multinomial(word_weights, 1)[0]\n+ input.data.fill_(word_idx)\n+ word = idx2item[word_idx].decode('UTF-8')\n+ characters.append(word)\n+\n+ return ''.join(characters)\n", "issue": "Generate text with language model\nIs there any script that can generate text from trained language model?\r\nAlso calculating perplexity on testing data would be helpful.\n", "before_files": [{"content": "import torch.nn as nn\nimport torch\nimport math\nfrom torch.autograd import Variable\nfrom typing import Dict, List\nfrom flair.data import Dictionary\n\n\nclass LanguageModel(nn.Module):\n \"\"\"Container module with an encoder, a recurrent module, and a decoder.\"\"\"\n\n def __init__(self,\n dictionary: Dictionary,\n is_forward_lm: bool,\n hidden_size: int,\n nlayers: int,\n embedding_size: int = 100,\n nout=None,\n dropout=0.5):\n\n super(LanguageModel, self).__init__()\n\n self.dictionary = dictionary\n self.is_forward_lm: bool = is_forward_lm\n\n self.dropout = dropout\n self.hidden_size = hidden_size\n self.embedding_size = embedding_size\n self.nlayers = nlayers\n\n self.drop = nn.Dropout(dropout)\n self.encoder = nn.Embedding(len(dictionary), embedding_size)\n\n if nlayers == 1:\n self.rnn = nn.LSTM(embedding_size, hidden_size, nlayers)\n else:\n self.rnn = nn.LSTM(embedding_size, hidden_size, nlayers, dropout=dropout)\n\n self.hidden = None\n\n self.nout = nout\n if nout is not None:\n self.proj = nn.Linear(hidden_size, nout)\n self.initialize(self.proj.weight)\n self.decoder = nn.Linear(nout, len(dictionary))\n else:\n self.proj = None\n self.decoder = nn.Linear(hidden_size, len(dictionary))\n\n self.init_weights()\n\n # auto-spawn on GPU if available\n if torch.cuda.is_available():\n self.cuda()\n\n def init_weights(self):\n initrange = 0.1\n self.encoder.weight.data.uniform_(-initrange, initrange)\n self.decoder.bias.data.fill_(0)\n self.decoder.weight.data.uniform_(-initrange, initrange)\n\n def set_hidden(self, hidden):\n self.hidden = hidden\n\n def forward(self, input, hidden, ordered_sequence_lengths=None):\n encoded = self.encoder(input)\n emb = self.drop(encoded)\n\n self.rnn.flatten_parameters()\n\n output, hidden = self.rnn(emb, hidden)\n\n if self.proj is not None:\n output = self.proj(output)\n\n output = self.drop(output)\n\n decoded = self.decoder(output.view(output.size(0) * output.size(1), output.size(2)))\n\n return decoded.view(output.size(0), output.size(1), decoded.size(1)), output, hidden\n\n def init_hidden(self, bsz):\n weight = next(self.parameters()).data\n return (Variable(weight.new(self.nlayers, bsz, self.hidden_size).zero_()),\n Variable(weight.new(self.nlayers, bsz, self.hidden_size).zero_()))\n\n def get_representation(self, strings: List[str], detach_from_lm=True):\n\n sequences_as_char_indices: List[List[int]] = []\n for string in strings:\n char_indices = [self.dictionary.get_idx_for_item(char) for char in string]\n sequences_as_char_indices.append(char_indices)\n\n batch = Variable(torch.LongTensor(sequences_as_char_indices).transpose(0, 1))\n\n if torch.cuda.is_available():\n batch = batch.cuda()\n\n hidden = self.init_hidden(len(strings))\n prediction, rnn_output, hidden = self.forward(batch, hidden)\n\n if detach_from_lm: rnn_output = self.repackage_hidden(rnn_output)\n\n return rnn_output\n\n def repackage_hidden(self, h):\n \"\"\"Wraps hidden states in new Variables, to detach them from their history.\"\"\"\n if type(h) == torch.Tensor:\n return Variable(h.data)\n else:\n return tuple(self.repackage_hidden(v) for v in h)\n\n def initialize(self, matrix):\n in_, out_ = matrix.size()\n stdv = math.sqrt(3. / (in_ + out_))\n matrix.data.uniform_(-stdv, stdv)\n\n @classmethod\n def load_language_model(cls, model_file):\n\n if not torch.cuda.is_available():\n state = torch.load(model_file, map_location='cpu')\n else:\n state = torch.load(model_file)\n\n model = LanguageModel(state['dictionary'],\n state['is_forward_lm'],\n state['hidden_size'],\n state['nlayers'],\n state['embedding_size'],\n state['nout'],\n state['dropout'])\n model.load_state_dict(state['state_dict'])\n model.eval()\n if torch.cuda.is_available():\n model.cuda()\n return model\n\n def save(self, file):\n model_state = {\n 'state_dict': self.state_dict(),\n 'dictionary': self.dictionary,\n 'is_forward_lm': self.is_forward_lm,\n 'hidden_size': self.hidden_size,\n 'nlayers': self.nlayers,\n 'embedding_size': self.embedding_size,\n 'nout': self.nout,\n 'dropout': self.dropout\n }\n torch.save(model_state, file, pickle_protocol=4)\n", "path": "flair/models/language_model.py"}], "after_files": [{"content": "import torch.nn as nn\nimport torch\nimport math\nfrom torch.autograd import Variable\nfrom typing import List\nfrom flair.data import Dictionary\n\n\nclass LanguageModel(nn.Module):\n \"\"\"Container module with an encoder, a recurrent module, and a decoder.\"\"\"\n\n def __init__(self,\n dictionary: Dictionary,\n is_forward_lm: bool,\n hidden_size: int,\n nlayers: int,\n embedding_size: int = 100,\n nout=None,\n dropout=0.5):\n\n super(LanguageModel, self).__init__()\n\n self.dictionary = dictionary\n self.is_forward_lm: bool = is_forward_lm\n\n self.dropout = dropout\n self.hidden_size = hidden_size\n self.embedding_size = embedding_size\n self.nlayers = nlayers\n\n self.drop = nn.Dropout(dropout)\n self.encoder = nn.Embedding(len(dictionary), embedding_size)\n\n if nlayers == 1:\n self.rnn = nn.LSTM(embedding_size, hidden_size, nlayers)\n else:\n self.rnn = nn.LSTM(embedding_size, hidden_size, nlayers, dropout=dropout)\n\n self.hidden = None\n\n self.nout = nout\n if nout is not None:\n self.proj = nn.Linear(hidden_size, nout)\n self.initialize(self.proj.weight)\n self.decoder = nn.Linear(nout, len(dictionary))\n else:\n self.proj = None\n self.decoder = nn.Linear(hidden_size, len(dictionary))\n\n self.init_weights()\n\n # auto-spawn on GPU if available\n if torch.cuda.is_available():\n self.cuda()\n\n def init_weights(self):\n initrange = 0.1\n self.encoder.weight.data.uniform_(-initrange, initrange)\n self.decoder.bias.data.fill_(0)\n self.decoder.weight.data.uniform_(-initrange, initrange)\n\n def set_hidden(self, hidden):\n self.hidden = hidden\n\n def forward(self, input, hidden, ordered_sequence_lengths=None):\n encoded = self.encoder(input)\n emb = self.drop(encoded)\n\n self.rnn.flatten_parameters()\n\n output, hidden = self.rnn(emb, hidden)\n\n if self.proj is not None:\n output = self.proj(output)\n\n output = self.drop(output)\n\n decoded = self.decoder(output.view(output.size(0) * output.size(1), output.size(2)))\n\n return decoded.view(output.size(0), output.size(1), decoded.size(1)), output, hidden\n\n def init_hidden(self, bsz):\n weight = next(self.parameters()).data\n return (Variable(weight.new(self.nlayers, bsz, self.hidden_size).zero_()),\n Variable(weight.new(self.nlayers, bsz, self.hidden_size).zero_()))\n\n def get_representation(self, strings: List[str], detach_from_lm=True):\n\n sequences_as_char_indices: List[List[int]] = []\n for string in strings:\n char_indices = [self.dictionary.get_idx_for_item(char) for char in string]\n sequences_as_char_indices.append(char_indices)\n\n batch = Variable(torch.LongTensor(sequences_as_char_indices).transpose(0, 1))\n\n if torch.cuda.is_available():\n batch = batch.cuda()\n\n hidden = self.init_hidden(len(strings))\n prediction, rnn_output, hidden = self.forward(batch, hidden)\n\n if detach_from_lm: rnn_output = self.repackage_hidden(rnn_output)\n\n return rnn_output\n\n def repackage_hidden(self, h):\n \"\"\"Wraps hidden states in new Variables, to detach them from their history.\"\"\"\n if type(h) == torch.Tensor:\n return Variable(h.data)\n else:\n return tuple(self.repackage_hidden(v) for v in h)\n\n def initialize(self, matrix):\n in_, out_ = matrix.size()\n stdv = math.sqrt(3. / (in_ + out_))\n matrix.data.uniform_(-stdv, stdv)\n\n @classmethod\n def load_language_model(cls, model_file):\n\n if not torch.cuda.is_available():\n state = torch.load(model_file, map_location='cpu')\n else:\n state = torch.load(model_file)\n\n model = LanguageModel(state['dictionary'],\n state['is_forward_lm'],\n state['hidden_size'],\n state['nlayers'],\n state['embedding_size'],\n state['nout'],\n state['dropout'])\n model.load_state_dict(state['state_dict'])\n model.eval()\n if torch.cuda.is_available():\n model.cuda()\n return model\n\n def save(self, file):\n model_state = {\n 'state_dict': self.state_dict(),\n 'dictionary': self.dictionary,\n 'is_forward_lm': self.is_forward_lm,\n 'hidden_size': self.hidden_size,\n 'nlayers': self.nlayers,\n 'embedding_size': self.embedding_size,\n 'nout': self.nout,\n 'dropout': self.dropout\n }\n torch.save(model_state, file, pickle_protocol=4)\n\n def generate_text(self, number_of_characters=1000) -> str:\n characters = []\n\n idx2item = self.dictionary.idx2item\n\n # initial hidden state\n hidden = self.init_hidden(1)\n input = torch.rand(1, 1).mul(len(idx2item)).long()\n if torch.cuda.is_available():\n input = input.cuda()\n\n for i in range(number_of_characters):\n prediction, rnn_output, hidden = self.forward(input, hidden)\n word_weights = prediction.squeeze().data.div(1.0).exp().cpu()\n word_idx = torch.multinomial(word_weights, 1)[0]\n input.data.fill_(word_idx)\n word = idx2item[word_idx].decode('UTF-8')\n characters.append(word)\n\n return ''.join(characters)\n", "path": "flair/models/language_model.py"}]}
| 1,701 | 309 |
gh_patches_debug_35899
|
rasdani/github-patches
|
git_diff
|
bids-standard__pybids-598
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Keeping the docs current
We're currently manually pushing the docs to https://bids-standard.github.io/pybids/, which is easy to forget, and the docs are currently out-of-date (0.9). It would be good to make an explicit process to update them or set up something automatic like ReadTheDocs.
ReadTheDocs also has the advantage of keeping old versions around, which might be useful for reference as the API changes.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `doc/conf.py`
Content:
```
1 # Configuration file for the Sphinx documentation builder.
2 #
3 # This file only contains a selection of the most common options. For a full
4 # list see the documentation:
5 # https://www.sphinx-doc.org/en/master/usage/configuration.html
6
7 # -- Path setup --------------------------------------------------------------
8
9 # If extensions (or modules to document with autodoc) are in another directory,
10 # add these directories to sys.path here. If the directory is relative to the
11 # documentation root, use os.path.abspath to make it absolute, like shown here.
12 #
13 import os
14 import sys
15 import sphinx_rtd_theme
16
17 sys.path.insert(0, os.path.abspath('../'))
18
19 # -- Project information -----------------------------------------------------
20
21 project = 'PyBIDS'
22 copyright = '2020, Developers of PyBIDS'
23 author = 'Developers of PyBIDS'
24
25 currentdir = os.path.abspath(os.path.dirname(__file__))
26 from bids._version import get_versions
27 __version__ = get_versions()['version']
28 source_version = __version__
29 currentdir = os.path.abspath(os.path.dirname(__file__))
30
31 # -- General configuration ---------------------------------------------------
32
33 # Add any Sphinx extension module names here, as strings. They can be
34 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
35 # ones.
36 extensions = [
37 'sphinx.ext.autodoc',
38 'sphinx.ext.autosummary',
39 'sphinx.ext.autosectionlabel',
40 'sphinx.ext.intersphinx',
41 'sphinx.ext.coverage',
42 'sphinx.ext.mathjax',
43 'sphinx.ext.viewcode',
44 'sphinx.ext.githubpages',
45 'sphinx.ext.napoleon',
46 'numpydoc',
47 'm2r'
48 ]
49
50 intersphinx_mapping = {
51 'http://docs.python.org/3.5': None,
52 'http://docs.scipy.org/doc/numpy': None,
53 'http://docs.scipy.org/doc/scipy/reference': None,
54 'http://matplotlib.org/': None,
55 'http://scikit-learn.org/0.17': None,
56 'http://nipy.org/nibabel/': None,
57 'http://pandas.pydata.org/pandas-docs/stable/': None,
58 'http://neurosynth.readthedocs.io/en/latest/': None,
59 }
60
61 # If your documentation needs a minimal Sphinx version, state it here.
62 needs_sphinx = '2.2.0'
63
64 # Generate stubs
65 autosummary_generate = True
66 autodoc_default_flags = ['members', 'inherited-members']
67 add_module_names = False
68
69 # Add any paths that contain templates here, relative to this directory.
70 templates_path = ['_templates']
71
72 # The suffix of source filenames.
73 source_suffix = ['.rst', '.md']
74
75 # The master toctree document.
76 master_doc = 'index'
77
78 # List of patterns, relative to source directory, that match files and
79 # directories to ignore when looking for source files.
80 # This pattern also affects html_static_path and html_extra_path.
81 exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
82
83 # The short X.Y version.
84 version = ''
85 # The full version, including alpha/beta/rc tags.
86 import bids
87 release = bids.__version__
88
89 # The name of the Pygments (syntax highlighting) style to use.
90 pygments_style = 'sphinx'
91
92 # -- Options for HTML output -------------------------------------------------
93
94 # The theme to use for HTML and HTML Help pages. See the documentation for
95 # a list of builtin themes.
96 #
97 # The theme to use for HTML and HTML Help pages. See the documentation for
98 # a list of builtin themes.
99 html_theme = "sphinx_rtd_theme"
100 html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
101
102 # Add any paths that contain custom static files (such as style sheets) here,
103 # relative to this directory. They are copied after the builtin static files,
104 # so a file named "default.css" will overwrite the builtin "default.css".
105 html_static_path = ['_static']
106
107 # If false, no module index is generated.
108 html_domain_indices = False
109
110 # One entry per manual page. List of tuples
111 # (source start file, name, description, authors, manual section).
112 man_pages = [
113 ('index', 'bids', 'bids Documentation',
114 [author], 1)
115 ]
116
117 # Grouping the document tree into Texinfo files. List of tuples
118 # (source start file, target name, title, author,
119 # dir menu entry, description, category)
120 texinfo_documents = [
121 ('index', 'bids', 'bids Documentation',
122 author, 'bids', 'One line description of project.',
123 'Miscellaneous'),
124 ]
125
126 # If false, no module index is generated.
127 texinfo_domain_indices = False
128
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/doc/conf.py b/doc/conf.py
--- a/doc/conf.py
+++ b/doc/conf.py
@@ -12,20 +12,21 @@
#
import os
import sys
+from datetime import date
+
import sphinx_rtd_theme
+import bids
+
sys.path.insert(0, os.path.abspath('../'))
# -- Project information -----------------------------------------------------
project = 'PyBIDS'
-copyright = '2020, Developers of PyBIDS'
+copyright = '2015-{}, Developers of PyBIDS'.format(date.today().year)
author = 'Developers of PyBIDS'
currentdir = os.path.abspath(os.path.dirname(__file__))
-from bids._version import get_versions
-__version__ = get_versions()['version']
-source_version = __version__
currentdir = os.path.abspath(os.path.dirname(__file__))
# -- General configuration ---------------------------------------------------
@@ -44,20 +45,21 @@
'sphinx.ext.githubpages',
'sphinx.ext.napoleon',
'numpydoc',
- 'm2r'
]
intersphinx_mapping = {
- 'http://docs.python.org/3.5': None,
- 'http://docs.scipy.org/doc/numpy': None,
- 'http://docs.scipy.org/doc/scipy/reference': None,
- 'http://matplotlib.org/': None,
- 'http://scikit-learn.org/0.17': None,
- 'http://nipy.org/nibabel/': None,
- 'http://pandas.pydata.org/pandas-docs/stable/': None,
- 'http://neurosynth.readthedocs.io/en/latest/': None,
+ 'https://docs.python.org/3.5': None,
+ 'https://docs.scipy.org/doc/numpy': None,
+ 'https://docs.scipy.org/doc/scipy/reference': None,
+ 'https://matplotlib.org/': None,
+ 'https://scikit-learn.org/0.17': None,
+ 'https://nipy.org/nibabel/': None,
+ 'https://pandas.pydata.org/pandas-docs/stable/': None,
+ 'https://neurosynth.readthedocs.io/en/latest/': None,
}
+intersphinx_timeout = 5
+
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '2.2.0'
@@ -81,10 +83,9 @@
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The short X.Y version.
-version = ''
+version = bids.__version__
# The full version, including alpha/beta/rc tags.
-import bids
-release = bids.__version__
+release = version
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
|
{"golden_diff": "diff --git a/doc/conf.py b/doc/conf.py\n--- a/doc/conf.py\n+++ b/doc/conf.py\n@@ -12,20 +12,21 @@\n #\n import os\n import sys\n+from datetime import date\n+\n import sphinx_rtd_theme\n \n+import bids\n+\n sys.path.insert(0, os.path.abspath('../'))\n \n # -- Project information -----------------------------------------------------\n \n project = 'PyBIDS'\n-copyright = '2020, Developers of PyBIDS'\n+copyright = '2015-{}, Developers of PyBIDS'.format(date.today().year)\n author = 'Developers of PyBIDS'\n \n currentdir = os.path.abspath(os.path.dirname(__file__))\n-from bids._version import get_versions\n-__version__ = get_versions()['version']\n-source_version = __version__\n currentdir = os.path.abspath(os.path.dirname(__file__))\n \n # -- General configuration ---------------------------------------------------\n@@ -44,20 +45,21 @@\n 'sphinx.ext.githubpages',\n 'sphinx.ext.napoleon',\n 'numpydoc',\n- 'm2r'\n ]\n \n intersphinx_mapping = {\n- 'http://docs.python.org/3.5': None,\n- 'http://docs.scipy.org/doc/numpy': None,\n- 'http://docs.scipy.org/doc/scipy/reference': None,\n- 'http://matplotlib.org/': None,\n- 'http://scikit-learn.org/0.17': None,\n- 'http://nipy.org/nibabel/': None,\n- 'http://pandas.pydata.org/pandas-docs/stable/': None,\n- 'http://neurosynth.readthedocs.io/en/latest/': None,\n+ 'https://docs.python.org/3.5': None,\n+ 'https://docs.scipy.org/doc/numpy': None,\n+ 'https://docs.scipy.org/doc/scipy/reference': None,\n+ 'https://matplotlib.org/': None,\n+ 'https://scikit-learn.org/0.17': None,\n+ 'https://nipy.org/nibabel/': None,\n+ 'https://pandas.pydata.org/pandas-docs/stable/': None,\n+ 'https://neurosynth.readthedocs.io/en/latest/': None,\n }\n \n+intersphinx_timeout = 5\n+\n # If your documentation needs a minimal Sphinx version, state it here.\n needs_sphinx = '2.2.0'\n \n@@ -81,10 +83,9 @@\n exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']\n \n # The short X.Y version.\n-version = ''\n+version = bids.__version__\n # The full version, including alpha/beta/rc tags.\n-import bids\n-release = bids.__version__\n+release = version\n \n # The name of the Pygments (syntax highlighting) style to use.\n pygments_style = 'sphinx'\n", "issue": "Keeping the docs current\nWe're currently manually pushing the docs to https://bids-standard.github.io/pybids/, which is easy to forget, and the docs are currently out-of-date (0.9). It would be good to make an explicit process to update them or set up something automatic like ReadTheDocs.\r\n\r\nReadTheDocs also has the advantage of keeping old versions around, which might be useful for reference as the API changes.\n", "before_files": [{"content": "# Configuration file for the Sphinx documentation builder.\n#\n# This file only contains a selection of the most common options. For a full\n# list see the documentation:\n# https://www.sphinx-doc.org/en/master/usage/configuration.html\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\nimport os\nimport sys\nimport sphinx_rtd_theme\n\nsys.path.insert(0, os.path.abspath('../'))\n\n# -- Project information -----------------------------------------------------\n\nproject = 'PyBIDS'\ncopyright = '2020, Developers of PyBIDS'\nauthor = 'Developers of PyBIDS'\n\ncurrentdir = os.path.abspath(os.path.dirname(__file__))\nfrom bids._version import get_versions\n__version__ = get_versions()['version']\nsource_version = __version__\ncurrentdir = os.path.abspath(os.path.dirname(__file__))\n\n# -- General configuration ---------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.autosummary',\n 'sphinx.ext.autosectionlabel',\n 'sphinx.ext.intersphinx',\n 'sphinx.ext.coverage',\n 'sphinx.ext.mathjax',\n 'sphinx.ext.viewcode',\n 'sphinx.ext.githubpages',\n 'sphinx.ext.napoleon',\n 'numpydoc',\n 'm2r'\n]\n\nintersphinx_mapping = {\n 'http://docs.python.org/3.5': None,\n 'http://docs.scipy.org/doc/numpy': None,\n 'http://docs.scipy.org/doc/scipy/reference': None,\n 'http://matplotlib.org/': None,\n 'http://scikit-learn.org/0.17': None,\n 'http://nipy.org/nibabel/': None,\n 'http://pandas.pydata.org/pandas-docs/stable/': None,\n 'http://neurosynth.readthedocs.io/en/latest/': None,\n}\n\n# If your documentation needs a minimal Sphinx version, state it here.\nneeds_sphinx = '2.2.0'\n\n# Generate stubs\nautosummary_generate = True\nautodoc_default_flags = ['members', 'inherited-members']\nadd_module_names = False\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix of source filenames.\nsource_suffix = ['.rst', '.md']\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']\n\n# The short X.Y version.\nversion = ''\n# The full version, including alpha/beta/rc tags.\nimport bids\nrelease = bids.__version__\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\nhtml_theme = \"sphinx_rtd_theme\"\nhtml_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\n# If false, no module index is generated.\nhtml_domain_indices = False\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n ('index', 'bids', 'bids Documentation',\n [author], 1)\n]\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n ('index', 'bids', 'bids Documentation',\n author, 'bids', 'One line description of project.',\n 'Miscellaneous'),\n]\n\n# If false, no module index is generated.\ntexinfo_domain_indices = False\n", "path": "doc/conf.py"}], "after_files": [{"content": "# Configuration file for the Sphinx documentation builder.\n#\n# This file only contains a selection of the most common options. For a full\n# list see the documentation:\n# https://www.sphinx-doc.org/en/master/usage/configuration.html\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\nimport os\nimport sys\nfrom datetime import date\n\nimport sphinx_rtd_theme\n\nimport bids\n\nsys.path.insert(0, os.path.abspath('../'))\n\n# -- Project information -----------------------------------------------------\n\nproject = 'PyBIDS'\ncopyright = '2015-{}, Developers of PyBIDS'.format(date.today().year)\nauthor = 'Developers of PyBIDS'\n\ncurrentdir = os.path.abspath(os.path.dirname(__file__))\ncurrentdir = os.path.abspath(os.path.dirname(__file__))\n\n# -- General configuration ---------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.autosummary',\n 'sphinx.ext.autosectionlabel',\n 'sphinx.ext.intersphinx',\n 'sphinx.ext.coverage',\n 'sphinx.ext.mathjax',\n 'sphinx.ext.viewcode',\n 'sphinx.ext.githubpages',\n 'sphinx.ext.napoleon',\n 'numpydoc',\n]\n\nintersphinx_mapping = {\n 'https://docs.python.org/3.5': None,\n 'https://docs.scipy.org/doc/numpy': None,\n 'https://docs.scipy.org/doc/scipy/reference': None,\n 'https://matplotlib.org/': None,\n 'https://scikit-learn.org/0.17': None,\n 'https://nipy.org/nibabel/': None,\n 'https://pandas.pydata.org/pandas-docs/stable/': None,\n 'https://neurosynth.readthedocs.io/en/latest/': None,\n}\n\nintersphinx_timeout = 5\n\n# If your documentation needs a minimal Sphinx version, state it here.\nneeds_sphinx = '2.2.0'\n\n# Generate stubs\nautosummary_generate = True\nautodoc_default_flags = ['members', 'inherited-members']\nadd_module_names = False\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix of source filenames.\nsource_suffix = ['.rst', '.md']\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']\n\n# The short X.Y version.\nversion = bids.__version__\n# The full version, including alpha/beta/rc tags.\nrelease = version\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\nhtml_theme = \"sphinx_rtd_theme\"\nhtml_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\n# If false, no module index is generated.\nhtml_domain_indices = False\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n ('index', 'bids', 'bids Documentation',\n [author], 1)\n]\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n ('index', 'bids', 'bids Documentation',\n author, 'bids', 'One line description of project.',\n 'Miscellaneous'),\n]\n\n# If false, no module index is generated.\ntexinfo_domain_indices = False\n", "path": "doc/conf.py"}]}
| 1,641 | 634 |
gh_patches_debug_25600
|
rasdani/github-patches
|
git_diff
|
Pylons__pyramid-1566
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Suggestion: pcreate should list the template part of basic usage
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pyramid/scripts/pcreate.py`
Content:
```
1 # (c) 2005 Ian Bicking and contributors; written for Paste
2 # (http://pythonpaste.org) Licensed under the MIT license:
3 # http://www.opensource.org/licenses/mit-license.php
4
5 import optparse
6 import os
7 import os.path
8 import pkg_resources
9 import re
10 import sys
11
12 _bad_chars_re = re.compile('[^a-zA-Z0-9_]')
13
14 def main(argv=sys.argv, quiet=False):
15 command = PCreateCommand(argv, quiet)
16 return command.run()
17
18 class PCreateCommand(object):
19 verbosity = 1 # required
20 description = "Render Pyramid scaffolding to an output directory"
21 usage = "usage: %prog [options] output_directory"
22 parser = optparse.OptionParser(usage, description=description)
23 parser.add_option('-s', '--scaffold',
24 dest='scaffold_name',
25 action='append',
26 help=("Add a scaffold to the create process "
27 "(multiple -s args accepted)"))
28 parser.add_option('-t', '--template',
29 dest='scaffold_name',
30 action='append',
31 help=('A backwards compatibility alias for '
32 '-s/--scaffold. Add a scaffold to the '
33 'create process (multiple -t args accepted)'))
34 parser.add_option('-l', '--list',
35 dest='list',
36 action='store_true',
37 help="List all available scaffold names")
38 parser.add_option('--list-templates',
39 dest='list',
40 action='store_true',
41 help=("A backwards compatibility alias for -l/--list. "
42 "List all available scaffold names."))
43 parser.add_option('--simulate',
44 dest='simulate',
45 action='store_true',
46 help='Simulate but do no work')
47 parser.add_option('--overwrite',
48 dest='overwrite',
49 action='store_true',
50 help='Always overwrite')
51 parser.add_option('--interactive',
52 dest='interactive',
53 action='store_true',
54 help='When a file would be overwritten, interrogate')
55
56 pyramid_dist = pkg_resources.get_distribution("pyramid")
57
58 def __init__(self, argv, quiet=False):
59 self.quiet = quiet
60 self.options, self.args = self.parser.parse_args(argv[1:])
61 self.scaffolds = self.all_scaffolds()
62
63 def run(self):
64 if self.options.list:
65 return self.show_scaffolds()
66 if not self.options.scaffold_name:
67 self.out('You must provide at least one scaffold name')
68 return 2
69 if not self.args:
70 self.out('You must provide a project name')
71 return 2
72 available = [x.name for x in self.scaffolds]
73 diff = set(self.options.scaffold_name).difference(available)
74 if diff:
75 self.out('Unavailable scaffolds: %s' % list(diff))
76 return 2
77 return self.render_scaffolds()
78
79 def render_scaffolds(self):
80 options = self.options
81 args = self.args
82 output_dir = os.path.abspath(os.path.normpath(args[0]))
83 project_name = os.path.basename(os.path.split(output_dir)[1])
84 pkg_name = _bad_chars_re.sub(
85 '', project_name.lower().replace('-', '_'))
86 safe_name = pkg_resources.safe_name(project_name)
87 egg_name = pkg_resources.to_filename(safe_name)
88
89 # get pyramid package version
90 pyramid_version = self.pyramid_dist.version
91
92 ## map pyramid package version of the documentation branch ##
93 # if version ends with 'dev' then docs version is 'master'
94 if self.pyramid_dist.version[-3:] == 'dev':
95 pyramid_docs_branch = 'master'
96 else:
97 # if not version is not 'dev' find the version.major_version string
98 # and combine it with '-branch'
99 version_match = re.match(r'(\d+\.\d+)', self.pyramid_dist.version)
100 if version_match is not None:
101 pyramid_docs_branch = "%s-branch" % version_match.group()
102 # if can not parse the version then default to 'latest'
103 else:
104 pyramid_docs_branch = 'latest'
105
106 vars = {
107 'project': project_name,
108 'package': pkg_name,
109 'egg': egg_name,
110 'pyramid_version': pyramid_version,
111 'pyramid_docs_branch': pyramid_docs_branch,
112 }
113 for scaffold_name in options.scaffold_name:
114 for scaffold in self.scaffolds:
115 if scaffold.name == scaffold_name:
116 scaffold.run(self, output_dir, vars)
117 return 0
118
119 def show_scaffolds(self):
120 scaffolds = sorted(self.scaffolds, key=lambda x: x.name)
121 if scaffolds:
122 max_name = max([len(t.name) for t in scaffolds])
123 self.out('Available scaffolds:')
124 for scaffold in scaffolds:
125 self.out(' %s:%s %s' % (
126 scaffold.name,
127 ' '*(max_name-len(scaffold.name)), scaffold.summary))
128 else:
129 self.out('No scaffolds available')
130 return 0
131
132 def all_scaffolds(self):
133 scaffolds = []
134 eps = list(pkg_resources.iter_entry_points('pyramid.scaffold'))
135 for entry in eps:
136 try:
137 scaffold_class = entry.load()
138 scaffold = scaffold_class(entry.name)
139 scaffolds.append(scaffold)
140 except Exception as e: # pragma: no cover
141 self.out('Warning: could not load entry point %s (%s: %s)' % (
142 entry.name, e.__class__.__name__, e))
143 return scaffolds
144
145 def out(self, msg): # pragma: no cover
146 if not self.quiet:
147 print(msg)
148
149 if __name__ == '__main__': # pragma: no cover
150 sys.exit(main() or 0)
151
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pyramid/scripts/pcreate.py b/pyramid/scripts/pcreate.py
--- a/pyramid/scripts/pcreate.py
+++ b/pyramid/scripts/pcreate.py
@@ -18,7 +18,7 @@
class PCreateCommand(object):
verbosity = 1 # required
description = "Render Pyramid scaffolding to an output directory"
- usage = "usage: %prog [options] output_directory"
+ usage = "usage: %prog [options] -s <scaffold> output_directory"
parser = optparse.OptionParser(usage, description=description)
parser.add_option('-s', '--scaffold',
dest='scaffold_name',
@@ -63,8 +63,16 @@
def run(self):
if self.options.list:
return self.show_scaffolds()
+ if not self.options.scaffold_name and not self.args:
+ if not self.quiet: # pragma: no cover
+ self.parser.print_help()
+ self.out('')
+ self.show_scaffolds()
+ return 2
if not self.options.scaffold_name:
- self.out('You must provide at least one scaffold name')
+ self.out('You must provide at least one scaffold name: -s <scaffold name>')
+ self.out('')
+ self.show_scaffolds()
return 2
if not self.args:
self.out('You must provide a project name')
|
{"golden_diff": "diff --git a/pyramid/scripts/pcreate.py b/pyramid/scripts/pcreate.py\n--- a/pyramid/scripts/pcreate.py\n+++ b/pyramid/scripts/pcreate.py\n@@ -18,7 +18,7 @@\n class PCreateCommand(object):\n verbosity = 1 # required\n description = \"Render Pyramid scaffolding to an output directory\"\n- usage = \"usage: %prog [options] output_directory\"\n+ usage = \"usage: %prog [options] -s <scaffold> output_directory\"\n parser = optparse.OptionParser(usage, description=description)\n parser.add_option('-s', '--scaffold',\n dest='scaffold_name',\n@@ -63,8 +63,16 @@\n def run(self):\n if self.options.list:\n return self.show_scaffolds()\n+ if not self.options.scaffold_name and not self.args:\n+ if not self.quiet: # pragma: no cover\n+ self.parser.print_help()\n+ self.out('')\n+ self.show_scaffolds()\n+ return 2\n if not self.options.scaffold_name:\n- self.out('You must provide at least one scaffold name')\n+ self.out('You must provide at least one scaffold name: -s <scaffold name>')\n+ self.out('')\n+ self.show_scaffolds()\n return 2\n if not self.args:\n self.out('You must provide a project name')\n", "issue": "Suggestion: pcreate should list the template part of basic usage\n\n", "before_files": [{"content": "# (c) 2005 Ian Bicking and contributors; written for Paste\n# (http://pythonpaste.org) Licensed under the MIT license:\n# http://www.opensource.org/licenses/mit-license.php\n\nimport optparse\nimport os\nimport os.path\nimport pkg_resources\nimport re\nimport sys\n\n_bad_chars_re = re.compile('[^a-zA-Z0-9_]')\n\ndef main(argv=sys.argv, quiet=False):\n command = PCreateCommand(argv, quiet)\n return command.run()\n\nclass PCreateCommand(object):\n verbosity = 1 # required\n description = \"Render Pyramid scaffolding to an output directory\"\n usage = \"usage: %prog [options] output_directory\"\n parser = optparse.OptionParser(usage, description=description)\n parser.add_option('-s', '--scaffold',\n dest='scaffold_name',\n action='append',\n help=(\"Add a scaffold to the create process \"\n \"(multiple -s args accepted)\"))\n parser.add_option('-t', '--template',\n dest='scaffold_name',\n action='append',\n help=('A backwards compatibility alias for '\n '-s/--scaffold. Add a scaffold to the '\n 'create process (multiple -t args accepted)'))\n parser.add_option('-l', '--list',\n dest='list',\n action='store_true',\n help=\"List all available scaffold names\")\n parser.add_option('--list-templates',\n dest='list',\n action='store_true',\n help=(\"A backwards compatibility alias for -l/--list. \"\n \"List all available scaffold names.\"))\n parser.add_option('--simulate',\n dest='simulate',\n action='store_true',\n help='Simulate but do no work')\n parser.add_option('--overwrite',\n dest='overwrite',\n action='store_true',\n help='Always overwrite')\n parser.add_option('--interactive',\n dest='interactive',\n action='store_true',\n help='When a file would be overwritten, interrogate')\n\n pyramid_dist = pkg_resources.get_distribution(\"pyramid\")\n\n def __init__(self, argv, quiet=False):\n self.quiet = quiet\n self.options, self.args = self.parser.parse_args(argv[1:])\n self.scaffolds = self.all_scaffolds()\n\n def run(self):\n if self.options.list:\n return self.show_scaffolds()\n if not self.options.scaffold_name:\n self.out('You must provide at least one scaffold name')\n return 2\n if not self.args:\n self.out('You must provide a project name')\n return 2\n available = [x.name for x in self.scaffolds]\n diff = set(self.options.scaffold_name).difference(available)\n if diff:\n self.out('Unavailable scaffolds: %s' % list(diff))\n return 2\n return self.render_scaffolds()\n\n def render_scaffolds(self):\n options = self.options\n args = self.args\n output_dir = os.path.abspath(os.path.normpath(args[0]))\n project_name = os.path.basename(os.path.split(output_dir)[1])\n pkg_name = _bad_chars_re.sub(\n '', project_name.lower().replace('-', '_'))\n safe_name = pkg_resources.safe_name(project_name)\n egg_name = pkg_resources.to_filename(safe_name)\n\n # get pyramid package version\n pyramid_version = self.pyramid_dist.version\n\n ## map pyramid package version of the documentation branch ##\n # if version ends with 'dev' then docs version is 'master'\n if self.pyramid_dist.version[-3:] == 'dev':\n pyramid_docs_branch = 'master'\n else:\n # if not version is not 'dev' find the version.major_version string\n # and combine it with '-branch'\n version_match = re.match(r'(\\d+\\.\\d+)', self.pyramid_dist.version)\n if version_match is not None:\n pyramid_docs_branch = \"%s-branch\" % version_match.group()\n # if can not parse the version then default to 'latest'\n else:\n pyramid_docs_branch = 'latest'\n\n vars = {\n 'project': project_name,\n 'package': pkg_name,\n 'egg': egg_name,\n 'pyramid_version': pyramid_version,\n 'pyramid_docs_branch': pyramid_docs_branch,\n }\n for scaffold_name in options.scaffold_name:\n for scaffold in self.scaffolds:\n if scaffold.name == scaffold_name:\n scaffold.run(self, output_dir, vars)\n return 0\n\n def show_scaffolds(self):\n scaffolds = sorted(self.scaffolds, key=lambda x: x.name)\n if scaffolds:\n max_name = max([len(t.name) for t in scaffolds])\n self.out('Available scaffolds:')\n for scaffold in scaffolds:\n self.out(' %s:%s %s' % (\n scaffold.name,\n ' '*(max_name-len(scaffold.name)), scaffold.summary))\n else:\n self.out('No scaffolds available')\n return 0\n\n def all_scaffolds(self):\n scaffolds = []\n eps = list(pkg_resources.iter_entry_points('pyramid.scaffold'))\n for entry in eps:\n try:\n scaffold_class = entry.load()\n scaffold = scaffold_class(entry.name)\n scaffolds.append(scaffold)\n except Exception as e: # pragma: no cover\n self.out('Warning: could not load entry point %s (%s: %s)' % (\n entry.name, e.__class__.__name__, e))\n return scaffolds\n\n def out(self, msg): # pragma: no cover\n if not self.quiet:\n print(msg)\n\nif __name__ == '__main__': # pragma: no cover\n sys.exit(main() or 0)\n", "path": "pyramid/scripts/pcreate.py"}], "after_files": [{"content": "# (c) 2005 Ian Bicking and contributors; written for Paste\n# (http://pythonpaste.org) Licensed under the MIT license:\n# http://www.opensource.org/licenses/mit-license.php\n\nimport optparse\nimport os\nimport os.path\nimport pkg_resources\nimport re\nimport sys\n\n_bad_chars_re = re.compile('[^a-zA-Z0-9_]')\n\ndef main(argv=sys.argv, quiet=False):\n command = PCreateCommand(argv, quiet)\n return command.run()\n\nclass PCreateCommand(object):\n verbosity = 1 # required\n description = \"Render Pyramid scaffolding to an output directory\"\n usage = \"usage: %prog [options] -s <scaffold> output_directory\"\n parser = optparse.OptionParser(usage, description=description)\n parser.add_option('-s', '--scaffold',\n dest='scaffold_name',\n action='append',\n help=(\"Add a scaffold to the create process \"\n \"(multiple -s args accepted)\"))\n parser.add_option('-t', '--template',\n dest='scaffold_name',\n action='append',\n help=('A backwards compatibility alias for '\n '-s/--scaffold. Add a scaffold to the '\n 'create process (multiple -t args accepted)'))\n parser.add_option('-l', '--list',\n dest='list',\n action='store_true',\n help=\"List all available scaffold names\")\n parser.add_option('--list-templates',\n dest='list',\n action='store_true',\n help=(\"A backwards compatibility alias for -l/--list. \"\n \"List all available scaffold names.\"))\n parser.add_option('--simulate',\n dest='simulate',\n action='store_true',\n help='Simulate but do no work')\n parser.add_option('--overwrite',\n dest='overwrite',\n action='store_true',\n help='Always overwrite')\n parser.add_option('--interactive',\n dest='interactive',\n action='store_true',\n help='When a file would be overwritten, interrogate')\n\n pyramid_dist = pkg_resources.get_distribution(\"pyramid\")\n\n def __init__(self, argv, quiet=False):\n self.quiet = quiet\n self.options, self.args = self.parser.parse_args(argv[1:])\n self.scaffolds = self.all_scaffolds()\n\n def run(self):\n if self.options.list:\n return self.show_scaffolds()\n if not self.options.scaffold_name and not self.args:\n if not self.quiet: # pragma: no cover\n self.parser.print_help()\n self.out('')\n self.show_scaffolds()\n return 2\n if not self.options.scaffold_name:\n self.out('You must provide at least one scaffold name: -s <scaffold name>')\n self.out('')\n self.show_scaffolds()\n return 2\n if not self.args:\n self.out('You must provide a project name')\n return 2\n available = [x.name for x in self.scaffolds]\n diff = set(self.options.scaffold_name).difference(available)\n if diff:\n self.out('Unavailable scaffolds: %s' % list(diff))\n return 2\n return self.render_scaffolds()\n\n def render_scaffolds(self):\n options = self.options\n args = self.args\n output_dir = os.path.abspath(os.path.normpath(args[0]))\n project_name = os.path.basename(os.path.split(output_dir)[1])\n pkg_name = _bad_chars_re.sub(\n '', project_name.lower().replace('-', '_'))\n safe_name = pkg_resources.safe_name(project_name)\n egg_name = pkg_resources.to_filename(safe_name)\n\n # get pyramid package version\n pyramid_version = self.pyramid_dist.version\n\n ## map pyramid package version of the documentation branch ##\n # if version ends with 'dev' then docs version is 'master'\n if self.pyramid_dist.version[-3:] == 'dev':\n pyramid_docs_branch = 'master'\n else:\n # if not version is not 'dev' find the version.major_version string\n # and combine it with '-branch'\n version_match = re.match(r'(\\d+\\.\\d+)', self.pyramid_dist.version)\n if version_match is not None:\n pyramid_docs_branch = \"%s-branch\" % version_match.group()\n # if can not parse the version then default to 'latest'\n else:\n pyramid_docs_branch = 'latest'\n\n vars = {\n 'project': project_name,\n 'package': pkg_name,\n 'egg': egg_name,\n 'pyramid_version': pyramid_version,\n 'pyramid_docs_branch': pyramid_docs_branch,\n }\n for scaffold_name in options.scaffold_name:\n for scaffold in self.scaffolds:\n if scaffold.name == scaffold_name:\n scaffold.run(self, output_dir, vars)\n return 0\n\n def show_scaffolds(self):\n scaffolds = sorted(self.scaffolds, key=lambda x: x.name)\n if scaffolds:\n max_name = max([len(t.name) for t in scaffolds])\n self.out('Available scaffolds:')\n for scaffold in scaffolds:\n self.out(' %s:%s %s' % (\n scaffold.name,\n ' '*(max_name-len(scaffold.name)), scaffold.summary))\n else:\n self.out('No scaffolds available')\n return 0\n\n def all_scaffolds(self):\n scaffolds = []\n eps = list(pkg_resources.iter_entry_points('pyramid.scaffold'))\n for entry in eps:\n try:\n scaffold_class = entry.load()\n scaffold = scaffold_class(entry.name)\n scaffolds.append(scaffold)\n except Exception as e: # pragma: no cover\n self.out('Warning: could not load entry point %s (%s: %s)' % (\n entry.name, e.__class__.__name__, e))\n return scaffolds\n\n def out(self, msg): # pragma: no cover\n if not self.quiet:\n print(msg)\n\nif __name__ == '__main__': # pragma: no cover\n sys.exit(main() or 0)\n", "path": "pyramid/scripts/pcreate.py"}]}
| 1,852 | 308 |
gh_patches_debug_38306
|
rasdani/github-patches
|
git_diff
|
mampfes__hacs_waste_collection_schedule-825
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Changes to Campbelltown Council URLs and APIs have broken the ability to retrieve waste collection info (campbelltown_nsw_gov_au.py)
Hi there,
I'm absolutely useless with Python or I'd be coding a fix and lodging a pull request, but instead am posting this as an issue at the community's mercy :)
The Campbelltown Council (Australia) API URLs have changed, and I believe that they're now presenting address search data a little differently too - XML instead of JSON.
The overall pattern still appears to be the same:
1. Retrieve the GeolocationID for a given address via the 'address_search' API
2. Plug that GeolocationID into the 'collection' API to retrieve the waste collection info in HTML format, wrapped in JSON
**URLs in existing version of Campbelltown NSW AU Waste Collection Source:**
```
API_URLS = {
"address_search": "https://www.campbelltown.nsw.gov.au/ocsvc/public/spatial/findaddress?address={}",
"collection": "https://www.campbelltown.nsw.gov.au/ocsvc/Public/InMyNeighbourhood/WasteServices?GeoLocationId={}",
}
```
**URLs that work now:**
```
API_URLS = {
"address_search": "https://www.campbelltown.nsw.gov.au/api/v1/myarea/search?keywords={}",
"collection": "https://www.campbelltown.nsw.gov.au/ocapi/Public/myarea/wasteservices?geolocationid={}&ocsvclang=en-AU",
}
```
I'm not familiar with the old Campbelltown address search API, but looking at the code it appears that it used to return JSON data. It's now returning pure XML.
**Examples using the new API URLs:**
Address search result: [https://www.campbelltown.nsw.gov.au/api/v1/myarea/search?keywords=10%20Brookfield%20Road%20Minto%20NSW%202566](https://www.campbelltown.nsw.gov.au/api/v1/myarea/search?keywords=10%20Brookfield%20Road%20Minto%20NSW%202566)
Collection API result for the above GeolocationID: [https://www.campbelltown.nsw.gov.au/ocapi/Public/myarea/wasteservices?geolocationid=401ee13b-b04c-4948-82e4-09d274c479a0&ocsvclang=en-AU](https://www.campbelltown.nsw.gov.au/ocapi/Public/myarea/wasteservices?geolocationid=401ee13b-b04c-4948-82e4-09d274c479a0&ocsvclang=en-AU)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `custom_components/waste_collection_schedule/waste_collection_schedule/source/campbelltown_nsw_gov_au.py`
Content:
```
1 import datetime
2 import json
3
4 import requests
5 from bs4 import BeautifulSoup
6 from requests.utils import requote_uri
7 from waste_collection_schedule import Collection
8
9 TITLE = "Campbelltown City Council"
10 DESCRIPTION = "Source for Campbelltown City Council rubbish collection."
11 URL = "https://www.campbelltown.nsw.gov.au/"
12 TEST_CASES = {
13 "Minto Mall": {
14 "post_code": "2566",
15 "suburb": "Minto",
16 "street_name": "Brookfield Road",
17 "street_number": "10",
18 },
19 "Campbelltown Catholic Club": {
20 "post_code": "2560",
21 "suburb": "Campbelltown",
22 "street_name": "Camden Road",
23 "street_number": "20-22",
24 },
25 "Australia Post Ingleburn": {
26 "post_code": "2565",
27 "suburb": "INGLEBURN",
28 "street_name": "Oxford Road",
29 "street_number": "34",
30 },
31 }
32
33 API_URLS = {
34 "address_search": "https://www.campbelltown.nsw.gov.au/ocsvc/public/spatial/findaddress?address={}",
35 "collection": "https://www.campbelltown.nsw.gov.au/ocsvc/Public/InMyNeighbourhood/WasteServices?GeoLocationId={}",
36 }
37
38 HEADERS = {"user-agent": "Mozilla/5.0"}
39
40 ICON_MAP = {
41 "General Waste": "trash-can",
42 "Recycling": "mdi:recycle",
43 "Green Waste": "mdi:leaf",
44 }
45
46
47 class Source:
48 def __init__(
49 self, post_code: str, suburb: str, street_name: str, street_number: str
50 ):
51 self.post_code = post_code
52 self.suburb = suburb
53 self.street_name = street_name
54 self.street_number = street_number
55
56 def fetch(self):
57 locationId = 0
58
59 address = "{} {} {} NSW {}".format(
60 self.street_number, self.street_name, self.suburb, self.post_code
61 )
62
63 q = requote_uri(str(API_URLS["address_search"]).format(address))
64
65 # Retrieve suburbs
66 r = requests.get(q, headers=HEADERS)
67
68 data = json.loads(r.text)
69
70 # Find the ID for our suburb
71 for item in data["locations"]:
72 locationId = item["Id"]
73 break
74
75 if locationId == 0:
76 return []
77
78 # Retrieve the upcoming collections for our property
79 q = requote_uri(str(API_URLS["collection"]).format(locationId))
80
81 r = requests.get(q, headers=HEADERS)
82
83 data = json.loads(r.text)
84
85 responseContent = data["responseContent"]
86
87 soup = BeautifulSoup(responseContent, "html.parser")
88 services = soup.find_all("div", attrs={"class": "service-details"})
89
90 entries = []
91
92 for item in services:
93 # test if <div> contains a valid date. If not, is is not a collection item.
94 date_text = item.find("span")
95 try:
96 date = datetime.datetime.strptime(date_text.text, "%A%d %b %Y").date()
97
98 except ValueError:
99 continue
100
101 waste_type = item.contents[0].strip()
102
103 entries.append(
104 Collection(
105 date=date,
106 t=waste_type,
107 icon=ICON_MAP.get(waste_type, "mdi:trash-can"),
108 )
109 )
110
111 return entries
112
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/campbelltown_nsw_gov_au.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/campbelltown_nsw_gov_au.py
--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/campbelltown_nsw_gov_au.py
+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/campbelltown_nsw_gov_au.py
@@ -31,8 +31,8 @@
}
API_URLS = {
- "address_search": "https://www.campbelltown.nsw.gov.au/ocsvc/public/spatial/findaddress?address={}",
- "collection": "https://www.campbelltown.nsw.gov.au/ocsvc/Public/InMyNeighbourhood/WasteServices?GeoLocationId={}",
+ "address_search": "https://www.campbelltown.nsw.gov.au/api/v1/myarea/search?keywords={}",
+ "collection": "https://www.campbelltown.nsw.gov.au/ocapi/Public/myarea/wasteservices?geolocationid={}&ocsvclang=en-AU",
}
HEADERS = {"user-agent": "Mozilla/5.0"}
@@ -43,7 +43,6 @@
"Green Waste": "mdi:leaf",
}
-
class Source:
def __init__(
self, post_code: str, suburb: str, street_name: str, street_number: str
@@ -68,7 +67,7 @@
data = json.loads(r.text)
# Find the ID for our suburb
- for item in data["locations"]:
+ for item in data["Items"]:
locationId = item["Id"]
break
@@ -85,20 +84,28 @@
responseContent = data["responseContent"]
soup = BeautifulSoup(responseContent, "html.parser")
- services = soup.find_all("div", attrs={"class": "service-details"})
+ services = soup.find_all("div", attrs={"class": "waste-services-result"})
entries = []
for item in services:
# test if <div> contains a valid date. If not, is is not a collection item.
- date_text = item.find("span")
+ date_text = item.find("div", attrs={"class": "next-service"})
+
+ # The date format currently used on https://www.campbelltown.nsw.gov.au/Services-and-Facilities/Waste-and-Recycling/Check-my-collection-day
+ date_format = '%a %d/%m/%Y'
+
try:
- date = datetime.datetime.strptime(date_text.text, "%A%d %b %Y").date()
+ # Strip carriage returns and newlines out of the HTML content
+ cleaned_date_text = date_text.text.replace('\r','').replace('\n','').strip()
+
+ # Parse the date
+ date = datetime.datetime.strptime(cleaned_date_text, date_format).date()
except ValueError:
continue
- waste_type = item.contents[0].strip()
+ waste_type = item.find("h3").text.strip()
entries.append(
Collection(
|
{"golden_diff": "diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/campbelltown_nsw_gov_au.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/campbelltown_nsw_gov_au.py\n--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/campbelltown_nsw_gov_au.py\n+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/campbelltown_nsw_gov_au.py\n@@ -31,8 +31,8 @@\n }\n \n API_URLS = {\n- \"address_search\": \"https://www.campbelltown.nsw.gov.au/ocsvc/public/spatial/findaddress?address={}\",\n- \"collection\": \"https://www.campbelltown.nsw.gov.au/ocsvc/Public/InMyNeighbourhood/WasteServices?GeoLocationId={}\",\n+ \"address_search\": \"https://www.campbelltown.nsw.gov.au/api/v1/myarea/search?keywords={}\",\n+ \"collection\": \"https://www.campbelltown.nsw.gov.au/ocapi/Public/myarea/wasteservices?geolocationid={}&ocsvclang=en-AU\",\n }\n \n HEADERS = {\"user-agent\": \"Mozilla/5.0\"}\n@@ -43,7 +43,6 @@\n \"Green Waste\": \"mdi:leaf\",\n }\n \n-\n class Source:\n def __init__(\n self, post_code: str, suburb: str, street_name: str, street_number: str\n@@ -68,7 +67,7 @@\n data = json.loads(r.text)\n \n # Find the ID for our suburb\n- for item in data[\"locations\"]:\n+ for item in data[\"Items\"]:\n locationId = item[\"Id\"]\n break\n \n@@ -85,20 +84,28 @@\n responseContent = data[\"responseContent\"]\n \n soup = BeautifulSoup(responseContent, \"html.parser\")\n- services = soup.find_all(\"div\", attrs={\"class\": \"service-details\"})\n+ services = soup.find_all(\"div\", attrs={\"class\": \"waste-services-result\"})\n \n entries = []\n \n for item in services:\n # test if <div> contains a valid date. If not, is is not a collection item.\n- date_text = item.find(\"span\")\n+ date_text = item.find(\"div\", attrs={\"class\": \"next-service\"})\n+ \n+ # The date format currently used on https://www.campbelltown.nsw.gov.au/Services-and-Facilities/Waste-and-Recycling/Check-my-collection-day\n+ date_format = '%a %d/%m/%Y'\n+\n try:\n- date = datetime.datetime.strptime(date_text.text, \"%A%d %b %Y\").date()\n+ # Strip carriage returns and newlines out of the HTML content\n+ cleaned_date_text = date_text.text.replace('\\r','').replace('\\n','').strip()\n+\n+ # Parse the date\n+ date = datetime.datetime.strptime(cleaned_date_text, date_format).date()\n \n except ValueError:\n continue\n \n- waste_type = item.contents[0].strip()\n+ waste_type = item.find(\"h3\").text.strip()\n \n entries.append(\n Collection(\n", "issue": "Changes to Campbelltown Council URLs and APIs have broken the ability to retrieve waste collection info (campbelltown_nsw_gov_au.py)\nHi there,\r\n\r\nI'm absolutely useless with Python or I'd be coding a fix and lodging a pull request, but instead am posting this as an issue at the community's mercy :)\r\n\r\nThe Campbelltown Council (Australia) API URLs have changed, and I believe that they're now presenting address search data a little differently too - XML instead of JSON.\r\n\r\nThe overall pattern still appears to be the same:\r\n\r\n1. Retrieve the GeolocationID for a given address via the 'address_search' API\r\n2. Plug that GeolocationID into the 'collection' API to retrieve the waste collection info in HTML format, wrapped in JSON\r\n\r\n**URLs in existing version of Campbelltown NSW AU Waste Collection Source:**\r\n```\r\nAPI_URLS = {\r\n \"address_search\": \"https://www.campbelltown.nsw.gov.au/ocsvc/public/spatial/findaddress?address={}\",\r\n \"collection\": \"https://www.campbelltown.nsw.gov.au/ocsvc/Public/InMyNeighbourhood/WasteServices?GeoLocationId={}\",\r\n}\r\n```\r\n\r\n**URLs that work now:**\r\n```\r\nAPI_URLS = {\r\n \"address_search\": \"https://www.campbelltown.nsw.gov.au/api/v1/myarea/search?keywords={}\",\r\n \"collection\": \"https://www.campbelltown.nsw.gov.au/ocapi/Public/myarea/wasteservices?geolocationid={}&ocsvclang=en-AU\",\r\n}\r\n```\r\n\r\nI'm not familiar with the old Campbelltown address search API, but looking at the code it appears that it used to return JSON data. It's now returning pure XML. \r\n\r\n**Examples using the new API URLs:**\r\nAddress search result: [https://www.campbelltown.nsw.gov.au/api/v1/myarea/search?keywords=10%20Brookfield%20Road%20Minto%20NSW%202566](https://www.campbelltown.nsw.gov.au/api/v1/myarea/search?keywords=10%20Brookfield%20Road%20Minto%20NSW%202566)\r\n\r\nCollection API result for the above GeolocationID: [https://www.campbelltown.nsw.gov.au/ocapi/Public/myarea/wasteservices?geolocationid=401ee13b-b04c-4948-82e4-09d274c479a0&ocsvclang=en-AU](https://www.campbelltown.nsw.gov.au/ocapi/Public/myarea/wasteservices?geolocationid=401ee13b-b04c-4948-82e4-09d274c479a0&ocsvclang=en-AU)\n", "before_files": [{"content": "import datetime\nimport json\n\nimport requests\nfrom bs4 import BeautifulSoup\nfrom requests.utils import requote_uri\nfrom waste_collection_schedule import Collection\n\nTITLE = \"Campbelltown City Council\"\nDESCRIPTION = \"Source for Campbelltown City Council rubbish collection.\"\nURL = \"https://www.campbelltown.nsw.gov.au/\"\nTEST_CASES = {\n \"Minto Mall\": {\n \"post_code\": \"2566\",\n \"suburb\": \"Minto\",\n \"street_name\": \"Brookfield Road\",\n \"street_number\": \"10\",\n },\n \"Campbelltown Catholic Club\": {\n \"post_code\": \"2560\",\n \"suburb\": \"Campbelltown\",\n \"street_name\": \"Camden Road\",\n \"street_number\": \"20-22\",\n },\n \"Australia Post Ingleburn\": {\n \"post_code\": \"2565\",\n \"suburb\": \"INGLEBURN\",\n \"street_name\": \"Oxford Road\",\n \"street_number\": \"34\",\n },\n}\n\nAPI_URLS = {\n \"address_search\": \"https://www.campbelltown.nsw.gov.au/ocsvc/public/spatial/findaddress?address={}\",\n \"collection\": \"https://www.campbelltown.nsw.gov.au/ocsvc/Public/InMyNeighbourhood/WasteServices?GeoLocationId={}\",\n}\n\nHEADERS = {\"user-agent\": \"Mozilla/5.0\"}\n\nICON_MAP = {\n \"General Waste\": \"trash-can\",\n \"Recycling\": \"mdi:recycle\",\n \"Green Waste\": \"mdi:leaf\",\n}\n\n\nclass Source:\n def __init__(\n self, post_code: str, suburb: str, street_name: str, street_number: str\n ):\n self.post_code = post_code\n self.suburb = suburb\n self.street_name = street_name\n self.street_number = street_number\n\n def fetch(self):\n locationId = 0\n\n address = \"{} {} {} NSW {}\".format(\n self.street_number, self.street_name, self.suburb, self.post_code\n )\n\n q = requote_uri(str(API_URLS[\"address_search\"]).format(address))\n\n # Retrieve suburbs\n r = requests.get(q, headers=HEADERS)\n\n data = json.loads(r.text)\n\n # Find the ID for our suburb\n for item in data[\"locations\"]:\n locationId = item[\"Id\"]\n break\n\n if locationId == 0:\n return []\n\n # Retrieve the upcoming collections for our property\n q = requote_uri(str(API_URLS[\"collection\"]).format(locationId))\n\n r = requests.get(q, headers=HEADERS)\n\n data = json.loads(r.text)\n\n responseContent = data[\"responseContent\"]\n\n soup = BeautifulSoup(responseContent, \"html.parser\")\n services = soup.find_all(\"div\", attrs={\"class\": \"service-details\"})\n\n entries = []\n\n for item in services:\n # test if <div> contains a valid date. If not, is is not a collection item.\n date_text = item.find(\"span\")\n try:\n date = datetime.datetime.strptime(date_text.text, \"%A%d %b %Y\").date()\n\n except ValueError:\n continue\n\n waste_type = item.contents[0].strip()\n\n entries.append(\n Collection(\n date=date,\n t=waste_type,\n icon=ICON_MAP.get(waste_type, \"mdi:trash-can\"),\n )\n )\n\n return entries\n", "path": "custom_components/waste_collection_schedule/waste_collection_schedule/source/campbelltown_nsw_gov_au.py"}], "after_files": [{"content": "import datetime\nimport json\n\nimport requests\nfrom bs4 import BeautifulSoup\nfrom requests.utils import requote_uri\nfrom waste_collection_schedule import Collection\n\nTITLE = \"Campbelltown City Council\"\nDESCRIPTION = \"Source for Campbelltown City Council rubbish collection.\"\nURL = \"https://www.campbelltown.nsw.gov.au/\"\nTEST_CASES = {\n \"Minto Mall\": {\n \"post_code\": \"2566\",\n \"suburb\": \"Minto\",\n \"street_name\": \"Brookfield Road\",\n \"street_number\": \"10\",\n },\n \"Campbelltown Catholic Club\": {\n \"post_code\": \"2560\",\n \"suburb\": \"Campbelltown\",\n \"street_name\": \"Camden Road\",\n \"street_number\": \"20-22\",\n },\n \"Australia Post Ingleburn\": {\n \"post_code\": \"2565\",\n \"suburb\": \"INGLEBURN\",\n \"street_name\": \"Oxford Road\",\n \"street_number\": \"34\",\n },\n}\n\nAPI_URLS = {\n \"address_search\": \"https://www.campbelltown.nsw.gov.au/api/v1/myarea/search?keywords={}\",\n \"collection\": \"https://www.campbelltown.nsw.gov.au/ocapi/Public/myarea/wasteservices?geolocationid={}&ocsvclang=en-AU\",\n}\n\nHEADERS = {\"user-agent\": \"Mozilla/5.0\"}\n\nICON_MAP = {\n \"General Waste\": \"trash-can\",\n \"Recycling\": \"mdi:recycle\",\n \"Green Waste\": \"mdi:leaf\",\n}\n\nclass Source:\n def __init__(\n self, post_code: str, suburb: str, street_name: str, street_number: str\n ):\n self.post_code = post_code\n self.suburb = suburb\n self.street_name = street_name\n self.street_number = street_number\n\n def fetch(self):\n locationId = 0\n\n address = \"{} {} {} NSW {}\".format(\n self.street_number, self.street_name, self.suburb, self.post_code\n )\n\n q = requote_uri(str(API_URLS[\"address_search\"]).format(address))\n\n # Retrieve suburbs\n r = requests.get(q, headers=HEADERS)\n\n data = json.loads(r.text)\n\n # Find the ID for our suburb\n for item in data[\"Items\"]:\n locationId = item[\"Id\"]\n break\n\n if locationId == 0:\n return []\n\n # Retrieve the upcoming collections for our property\n q = requote_uri(str(API_URLS[\"collection\"]).format(locationId))\n\n r = requests.get(q, headers=HEADERS)\n\n data = json.loads(r.text)\n\n responseContent = data[\"responseContent\"]\n\n soup = BeautifulSoup(responseContent, \"html.parser\")\n services = soup.find_all(\"div\", attrs={\"class\": \"waste-services-result\"})\n\n entries = []\n\n for item in services:\n # test if <div> contains a valid date. If not, is is not a collection item.\n date_text = item.find(\"div\", attrs={\"class\": \"next-service\"})\n \n # The date format currently used on https://www.campbelltown.nsw.gov.au/Services-and-Facilities/Waste-and-Recycling/Check-my-collection-day\n date_format = '%a %d/%m/%Y'\n\n try:\n # Strip carriage returns and newlines out of the HTML content\n cleaned_date_text = date_text.text.replace('\\r','').replace('\\n','').strip()\n\n # Parse the date\n date = datetime.datetime.strptime(cleaned_date_text, date_format).date()\n\n except ValueError:\n continue\n\n waste_type = item.find(\"h3\").text.strip()\n\n entries.append(\n Collection(\n date=date,\n t=waste_type,\n icon=ICON_MAP.get(waste_type, \"mdi:trash-can\"),\n )\n )\n\n return entries\n", "path": "custom_components/waste_collection_schedule/waste_collection_schedule/source/campbelltown_nsw_gov_au.py"}]}
| 1,889 | 699 |
gh_patches_debug_25514
|
rasdani/github-patches
|
git_diff
|
DataDog__dd-trace-py-1049
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
httplib patch doesn't seem to support a default pin
I'm trying to set up `httplib` patching in my application and I'm struggling to get it to send spans with a `service` set. I am not certain, but I think that the documentation and code for the `httplib` patch in `ddtrace.contrib.httplib` is broken, and doesn't support setting up a default service to send spans as.
Here's my reproduction case:
```python
import logging
logging.basicConfig(level=logging.DEBUG)
from ddtrace import patch
patch(httplib=True)
# Code taken from the ddtrace-py docs for httplib on python 3
from ddtrace import Pin
import http.client
import urllib.request
# Use a Pin to specify metadata for all http requests
Pin.override(http.client, service='httplib')
resp = urllib.request.urlopen('https://www.datadoghq.com/')
```
If I run that locally on the latest master, I get messages like these from the `dd-trace-agent` running on my machine:
```
2018-11-16 14:51:47 ERROR (api.go:249) - dropping trace reason: invalid span (SpanID:11098683504101424356): span.normalize: empty `Service` (debug for more info), [name:"http.client.request" resource:"http.client.request" traceID:2003532794568525569 spanID:11098683504101424356 start:1542397906651150080 duration:274516105 meta:<key:"http.method" value:"GET" > meta:<key:"http.status_code" value:"200" > meta:<key:"http.url" value:"https://www.datadoghq.com/" > meta:<key:"system.pid" value:"29250" > type:"http" ]
```
I did some debugging locally, and it looks like the default Pin added to the `HTTPSConnection` instance in the wrapped constructor (https://github.com/DataDog/dd-trace-py/blob/v0.16.0/ddtrace/contrib/httplib/patch.py#L19) is the one that ends up feeding the span's their service value. The pin added in the example code (as suggested by the docs) isn't having any effect. Unless there's something I'm missing, this seems like it makes sense, because the pin the docs suggest adding is on the `http.client` module, not on the classes inside nor on instances of those classes. How was that supposed to work @palazzem?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ddtrace/contrib/httplib/__init__.py`
Content:
```
1 """
2 Patch the built-in httplib/http.client libraries to trace all HTTP calls.
3
4
5 Usage::
6
7 # Patch all supported modules/functions
8 from ddtrace import patch
9 patch(httplib=True)
10
11 # Python 2
12 from ddtrace import Pin
13 import httplib
14 import urllib
15
16 # Use a Pin to specify metadata for all http requests
17 Pin.override(httplib, service='httplib')
18 resp = urllib.urlopen('http://www.datadog.com/')
19
20 # Python 3
21 from ddtrace import Pin
22 import http.client
23 import urllib.request
24
25 # Use a Pin to specify metadata for all http requests
26 Pin.override(http.client, service='httplib')
27 resp = urllib.request.urlopen('http://www.datadog.com/')
28
29 :ref:`Headers tracing <http-headers-tracing>` is supported for this integration.
30 """
31 from .patch import patch, unpatch
32 __all__ = ['patch', 'unpatch']
33
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/ddtrace/contrib/httplib/__init__.py b/ddtrace/contrib/httplib/__init__.py
--- a/ddtrace/contrib/httplib/__init__.py
+++ b/ddtrace/contrib/httplib/__init__.py
@@ -1,5 +1,5 @@
"""
-Patch the built-in httplib/http.client libraries to trace all HTTP calls.
+Patch the built-in ``httplib``/``http.client`` libraries to trace all HTTP calls.
Usage::
@@ -9,23 +9,24 @@
patch(httplib=True)
# Python 2
- from ddtrace import Pin
import httplib
import urllib
- # Use a Pin to specify metadata for all http requests
- Pin.override(httplib, service='httplib')
resp = urllib.urlopen('http://www.datadog.com/')
# Python 3
- from ddtrace import Pin
import http.client
import urllib.request
- # Use a Pin to specify metadata for all http requests
- Pin.override(http.client, service='httplib')
resp = urllib.request.urlopen('http://www.datadog.com/')
+``httplib`` spans do not include a default service name. Before HTTP calls are
+made, ensure a parent span has been started with a service name to be used for
+spans generated from those calls::
+
+ with tracer.trace('main', service='my-httplib-operation'):
+ resp = urllib.request.urlopen('http://www.datadog.com/')
+
:ref:`Headers tracing <http-headers-tracing>` is supported for this integration.
"""
from .patch import patch, unpatch
|
{"golden_diff": "diff --git a/ddtrace/contrib/httplib/__init__.py b/ddtrace/contrib/httplib/__init__.py\n--- a/ddtrace/contrib/httplib/__init__.py\n+++ b/ddtrace/contrib/httplib/__init__.py\n@@ -1,5 +1,5 @@\n \"\"\"\n-Patch the built-in httplib/http.client libraries to trace all HTTP calls.\n+Patch the built-in ``httplib``/``http.client`` libraries to trace all HTTP calls.\n \n \n Usage::\n@@ -9,23 +9,24 @@\n patch(httplib=True)\n \n # Python 2\n- from ddtrace import Pin\n import httplib\n import urllib\n \n- # Use a Pin to specify metadata for all http requests\n- Pin.override(httplib, service='httplib')\n resp = urllib.urlopen('http://www.datadog.com/')\n \n # Python 3\n- from ddtrace import Pin\n import http.client\n import urllib.request\n \n- # Use a Pin to specify metadata for all http requests\n- Pin.override(http.client, service='httplib')\n resp = urllib.request.urlopen('http://www.datadog.com/')\n \n+``httplib`` spans do not include a default service name. Before HTTP calls are\n+made, ensure a parent span has been started with a service name to be used for\n+spans generated from those calls::\n+\n+ with tracer.trace('main', service='my-httplib-operation'):\n+ resp = urllib.request.urlopen('http://www.datadog.com/')\n+\n :ref:`Headers tracing <http-headers-tracing>` is supported for this integration.\n \"\"\"\n from .patch import patch, unpatch\n", "issue": "httplib patch doesn't seem to support a default pin\nI'm trying to set up `httplib` patching in my application and I'm struggling to get it to send spans with a `service` set. I am not certain, but I think that the documentation and code for the `httplib` patch in `ddtrace.contrib.httplib` is broken, and doesn't support setting up a default service to send spans as.\r\n\r\nHere's my reproduction case:\r\n\r\n```python\r\nimport logging\r\nlogging.basicConfig(level=logging.DEBUG)\r\n\r\nfrom ddtrace import patch\r\npatch(httplib=True)\r\n\r\n# Code taken from the ddtrace-py docs for httplib on python 3\r\nfrom ddtrace import Pin\r\nimport http.client\r\nimport urllib.request\r\n\r\n# Use a Pin to specify metadata for all http requests\r\nPin.override(http.client, service='httplib')\r\nresp = urllib.request.urlopen('https://www.datadoghq.com/')\r\n```\r\n\r\nIf I run that locally on the latest master, I get messages like these from the `dd-trace-agent` running on my machine:\r\n\r\n```\r\n2018-11-16 14:51:47 ERROR (api.go:249) - dropping trace reason: invalid span (SpanID:11098683504101424356): span.normalize: empty `Service` (debug for more info), [name:\"http.client.request\" resource:\"http.client.request\" traceID:2003532794568525569 spanID:11098683504101424356 start:1542397906651150080 duration:274516105 meta:<key:\"http.method\" value:\"GET\" > meta:<key:\"http.status_code\" value:\"200\" > meta:<key:\"http.url\" value:\"https://www.datadoghq.com/\" > meta:<key:\"system.pid\" value:\"29250\" > type:\"http\" ]\r\n```\r\n\r\nI did some debugging locally, and it looks like the default Pin added to the `HTTPSConnection` instance in the wrapped constructor (https://github.com/DataDog/dd-trace-py/blob/v0.16.0/ddtrace/contrib/httplib/patch.py#L19) is the one that ends up feeding the span's their service value. The pin added in the example code (as suggested by the docs) isn't having any effect. Unless there's something I'm missing, this seems like it makes sense, because the pin the docs suggest adding is on the `http.client` module, not on the classes inside nor on instances of those classes. How was that supposed to work @palazzem? \n", "before_files": [{"content": "\"\"\"\nPatch the built-in httplib/http.client libraries to trace all HTTP calls.\n\n\nUsage::\n\n # Patch all supported modules/functions\n from ddtrace import patch\n patch(httplib=True)\n\n # Python 2\n from ddtrace import Pin\n import httplib\n import urllib\n\n # Use a Pin to specify metadata for all http requests\n Pin.override(httplib, service='httplib')\n resp = urllib.urlopen('http://www.datadog.com/')\n\n # Python 3\n from ddtrace import Pin\n import http.client\n import urllib.request\n\n # Use a Pin to specify metadata for all http requests\n Pin.override(http.client, service='httplib')\n resp = urllib.request.urlopen('http://www.datadog.com/')\n\n:ref:`Headers tracing <http-headers-tracing>` is supported for this integration.\n\"\"\"\nfrom .patch import patch, unpatch\n__all__ = ['patch', 'unpatch']\n", "path": "ddtrace/contrib/httplib/__init__.py"}], "after_files": [{"content": "\"\"\"\nPatch the built-in ``httplib``/``http.client`` libraries to trace all HTTP calls.\n\n\nUsage::\n\n # Patch all supported modules/functions\n from ddtrace import patch\n patch(httplib=True)\n\n # Python 2\n import httplib\n import urllib\n\n resp = urllib.urlopen('http://www.datadog.com/')\n\n # Python 3\n import http.client\n import urllib.request\n\n resp = urllib.request.urlopen('http://www.datadog.com/')\n\n``httplib`` spans do not include a default service name. Before HTTP calls are\nmade, ensure a parent span has been started with a service name to be used for\nspans generated from those calls::\n\n with tracer.trace('main', service='my-httplib-operation'):\n resp = urllib.request.urlopen('http://www.datadog.com/')\n\n:ref:`Headers tracing <http-headers-tracing>` is supported for this integration.\n\"\"\"\nfrom .patch import patch, unpatch\n__all__ = ['patch', 'unpatch']\n", "path": "ddtrace/contrib/httplib/__init__.py"}]}
| 1,131 | 364 |
gh_patches_debug_4366
|
rasdani/github-patches
|
git_diff
|
ephios-dev__ephios-1078
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Notification Mails detected as SPAM
**Describe the bug**
This is likely specific to the exact email setup users have but in my case, the notification mails in my case are caught by the SpamAssassin rule `TO_NO_BRKTS_HTML_IMG` which is discussed [here](https://stackoverflow.com/questions/40082594/meaning-of-to-no-brkts-html-img-in-spamassassin). As a fix for this, it would be useful if ephios used the configured first and last name to properly address the users in the `To:` field of the email.
**To Reproduce**
Make ephios send a notification email.
**Expected behavior**
Emails should arrive in my inbox without being detected as SPAM. This could be done by modifying the `To:` field.
**Screenshots**
\-
**Environment**
An email SPAM filter using SpamAssassin
**Additional context**
\-
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ephios/core/services/notifications/backends.py`
Content:
```
1 import logging
2 import smtplib
3 import traceback
4
5 from django.conf import settings
6 from django.core.mail import mail_admins
7 from django.utils.translation import gettext_lazy as _
8 from webpush import send_user_notification
9
10 from ephios.core.models.users import Notification
11 from ephios.core.services.mail.send import send_mail
12
13 logger = logging.getLogger(__name__)
14
15
16 def installed_notification_backends():
17 from ephios.core.signals import register_notification_backends
18
19 for _, backends in register_notification_backends.send_to_all_plugins(None):
20 yield from (b() for b in backends)
21
22
23 def enabled_notification_backends():
24 from ephios.core.signals import register_notification_backends
25
26 for _, backends in register_notification_backends.send(None):
27 yield from (b() for b in backends)
28
29
30 def send_all_notifications():
31 for backend in installed_notification_backends():
32 for notification in Notification.objects.filter(failed=False):
33 if backend.can_send(notification) and backend.user_prefers_sending(notification):
34 try:
35 backend.send(notification)
36 except Exception as e: # pylint: disable=broad-except
37 if settings.DEBUG:
38 raise e
39 notification.failed = True
40 notification.save()
41 try:
42 mail_admins(
43 "Notification sending failed",
44 f"Notification: {notification}\nException: {e}\n{traceback.format_exc()}",
45 )
46 except smtplib.SMTPConnectError:
47 pass # if the mail backend threw this, mail admin will probably throw this as well
48 logger.warning(
49 f"Notification sending failed for notification object #{notification.pk} ({notification}) for backend {backend} with {e}"
50 )
51 Notification.objects.filter(failed=False).delete()
52
53
54 class AbstractNotificationBackend:
55 @property
56 def slug(self):
57 return NotImplementedError
58
59 @property
60 def title(self):
61 return NotImplementedError
62
63 @classmethod
64 def can_send(cls, notification):
65 return notification.user is not None
66
67 @classmethod
68 def user_prefers_sending(cls, notification):
69 if notification.notification_type.unsubscribe_allowed and notification.user is not None:
70 if not notification.user.is_active:
71 return False
72 backends = notification.user.preferences["notifications__notifications"].get(
73 notification.slug
74 )
75 if backends is not None:
76 return cls.slug in backends
77 return True
78
79 @classmethod
80 def send(cls, notification: Notification):
81 raise NotImplementedError
82
83
84 class EmailNotificationBackend(AbstractNotificationBackend):
85 slug = "ephios_backend_email"
86 title = _("via email")
87
88 @classmethod
89 def can_send(cls, notification):
90 return notification.user is not None or "email" in notification.data
91
92 @classmethod
93 def _get_mailaddress(cls, notification):
94 return notification.user.email if notification.user else notification.data.get("email")
95
96 @classmethod
97 def send(cls, notification):
98 send_mail(
99 to=[cls._get_mailaddress(notification)],
100 subject=notification.subject,
101 plaintext=notification.as_plaintext(),
102 html=notification.as_html(),
103 is_autogenerated=True,
104 )
105
106
107 class WebPushNotificationBackend(AbstractNotificationBackend):
108 slug = "ephios_backend_webpush"
109 title = _("via push notification")
110
111 @classmethod
112 def send(cls, notification):
113 payload = {
114 "head": str(notification.subject),
115 "body": notification.body,
116 "icon": "/static/ephios/img/ephios-symbol-red.svg",
117 }
118 if actions := notification.get_actions():
119 payload["url"] = actions[0][1]
120 send_user_notification(user=notification.user, payload=payload, ttl=1000)
121
122
123 CORE_NOTIFICATION_BACKENDS = [EmailNotificationBackend, WebPushNotificationBackend]
124
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/ephios/core/services/notifications/backends.py b/ephios/core/services/notifications/backends.py
--- a/ephios/core/services/notifications/backends.py
+++ b/ephios/core/services/notifications/backends.py
@@ -91,7 +91,9 @@
@classmethod
def _get_mailaddress(cls, notification):
- return notification.user.email if notification.user else notification.data.get("email")
+ if notification.user:
+ return f"{notification.user.get_full_name()} <{notification.user.email}>"
+ return notification.data.get("email")
@classmethod
def send(cls, notification):
|
{"golden_diff": "diff --git a/ephios/core/services/notifications/backends.py b/ephios/core/services/notifications/backends.py\n--- a/ephios/core/services/notifications/backends.py\n+++ b/ephios/core/services/notifications/backends.py\n@@ -91,7 +91,9 @@\n \n @classmethod\n def _get_mailaddress(cls, notification):\n- return notification.user.email if notification.user else notification.data.get(\"email\")\n+ if notification.user:\n+ return f\"{notification.user.get_full_name()} <{notification.user.email}>\"\n+ return notification.data.get(\"email\")\n \n @classmethod\n def send(cls, notification):\n", "issue": "Notification Mails detected as SPAM\n**Describe the bug**\r\nThis is likely specific to the exact email setup users have but in my case, the notification mails in my case are caught by the SpamAssassin rule `TO_NO_BRKTS_HTML_IMG` which is discussed [here](https://stackoverflow.com/questions/40082594/meaning-of-to-no-brkts-html-img-in-spamassassin). As a fix for this, it would be useful if ephios used the configured first and last name to properly address the users in the `To:` field of the email. \r\n\r\n**To Reproduce**\r\nMake ephios send a notification email.\r\n\r\n**Expected behavior**\r\nEmails should arrive in my inbox without being detected as SPAM. This could be done by modifying the `To:` field. \r\n\r\n**Screenshots**\r\n\\-\r\n\r\n**Environment**\r\nAn email SPAM filter using SpamAssassin\r\n\r\n**Additional context**\r\n\\-\r\n\n", "before_files": [{"content": "import logging\nimport smtplib\nimport traceback\n\nfrom django.conf import settings\nfrom django.core.mail import mail_admins\nfrom django.utils.translation import gettext_lazy as _\nfrom webpush import send_user_notification\n\nfrom ephios.core.models.users import Notification\nfrom ephios.core.services.mail.send import send_mail\n\nlogger = logging.getLogger(__name__)\n\n\ndef installed_notification_backends():\n from ephios.core.signals import register_notification_backends\n\n for _, backends in register_notification_backends.send_to_all_plugins(None):\n yield from (b() for b in backends)\n\n\ndef enabled_notification_backends():\n from ephios.core.signals import register_notification_backends\n\n for _, backends in register_notification_backends.send(None):\n yield from (b() for b in backends)\n\n\ndef send_all_notifications():\n for backend in installed_notification_backends():\n for notification in Notification.objects.filter(failed=False):\n if backend.can_send(notification) and backend.user_prefers_sending(notification):\n try:\n backend.send(notification)\n except Exception as e: # pylint: disable=broad-except\n if settings.DEBUG:\n raise e\n notification.failed = True\n notification.save()\n try:\n mail_admins(\n \"Notification sending failed\",\n f\"Notification: {notification}\\nException: {e}\\n{traceback.format_exc()}\",\n )\n except smtplib.SMTPConnectError:\n pass # if the mail backend threw this, mail admin will probably throw this as well\n logger.warning(\n f\"Notification sending failed for notification object #{notification.pk} ({notification}) for backend {backend} with {e}\"\n )\n Notification.objects.filter(failed=False).delete()\n\n\nclass AbstractNotificationBackend:\n @property\n def slug(self):\n return NotImplementedError\n\n @property\n def title(self):\n return NotImplementedError\n\n @classmethod\n def can_send(cls, notification):\n return notification.user is not None\n\n @classmethod\n def user_prefers_sending(cls, notification):\n if notification.notification_type.unsubscribe_allowed and notification.user is not None:\n if not notification.user.is_active:\n return False\n backends = notification.user.preferences[\"notifications__notifications\"].get(\n notification.slug\n )\n if backends is not None:\n return cls.slug in backends\n return True\n\n @classmethod\n def send(cls, notification: Notification):\n raise NotImplementedError\n\n\nclass EmailNotificationBackend(AbstractNotificationBackend):\n slug = \"ephios_backend_email\"\n title = _(\"via email\")\n\n @classmethod\n def can_send(cls, notification):\n return notification.user is not None or \"email\" in notification.data\n\n @classmethod\n def _get_mailaddress(cls, notification):\n return notification.user.email if notification.user else notification.data.get(\"email\")\n\n @classmethod\n def send(cls, notification):\n send_mail(\n to=[cls._get_mailaddress(notification)],\n subject=notification.subject,\n plaintext=notification.as_plaintext(),\n html=notification.as_html(),\n is_autogenerated=True,\n )\n\n\nclass WebPushNotificationBackend(AbstractNotificationBackend):\n slug = \"ephios_backend_webpush\"\n title = _(\"via push notification\")\n\n @classmethod\n def send(cls, notification):\n payload = {\n \"head\": str(notification.subject),\n \"body\": notification.body,\n \"icon\": \"/static/ephios/img/ephios-symbol-red.svg\",\n }\n if actions := notification.get_actions():\n payload[\"url\"] = actions[0][1]\n send_user_notification(user=notification.user, payload=payload, ttl=1000)\n\n\nCORE_NOTIFICATION_BACKENDS = [EmailNotificationBackend, WebPushNotificationBackend]\n", "path": "ephios/core/services/notifications/backends.py"}], "after_files": [{"content": "import logging\nimport smtplib\nimport traceback\n\nfrom django.conf import settings\nfrom django.core.mail import mail_admins\nfrom django.utils.translation import gettext_lazy as _\nfrom webpush import send_user_notification\n\nfrom ephios.core.models.users import Notification\nfrom ephios.core.services.mail.send import send_mail\n\nlogger = logging.getLogger(__name__)\n\n\ndef installed_notification_backends():\n from ephios.core.signals import register_notification_backends\n\n for _, backends in register_notification_backends.send_to_all_plugins(None):\n yield from (b() for b in backends)\n\n\ndef enabled_notification_backends():\n from ephios.core.signals import register_notification_backends\n\n for _, backends in register_notification_backends.send(None):\n yield from (b() for b in backends)\n\n\ndef send_all_notifications():\n for backend in installed_notification_backends():\n for notification in Notification.objects.filter(failed=False):\n if backend.can_send(notification) and backend.user_prefers_sending(notification):\n try:\n backend.send(notification)\n except Exception as e: # pylint: disable=broad-except\n if settings.DEBUG:\n raise e\n notification.failed = True\n notification.save()\n try:\n mail_admins(\n \"Notification sending failed\",\n f\"Notification: {notification}\\nException: {e}\\n{traceback.format_exc()}\",\n )\n except smtplib.SMTPConnectError:\n pass # if the mail backend threw this, mail admin will probably throw this as well\n logger.warning(\n f\"Notification sending failed for notification object #{notification.pk} ({notification}) for backend {backend} with {e}\"\n )\n Notification.objects.filter(failed=False).delete()\n\n\nclass AbstractNotificationBackend:\n @property\n def slug(self):\n return NotImplementedError\n\n @property\n def title(self):\n return NotImplementedError\n\n @classmethod\n def can_send(cls, notification):\n return notification.user is not None\n\n @classmethod\n def user_prefers_sending(cls, notification):\n if notification.notification_type.unsubscribe_allowed and notification.user is not None:\n if not notification.user.is_active:\n return False\n backends = notification.user.preferences[\"notifications__notifications\"].get(\n notification.slug\n )\n if backends is not None:\n return cls.slug in backends\n return True\n\n @classmethod\n def send(cls, notification: Notification):\n raise NotImplementedError\n\n\nclass EmailNotificationBackend(AbstractNotificationBackend):\n slug = \"ephios_backend_email\"\n title = _(\"via email\")\n\n @classmethod\n def can_send(cls, notification):\n return notification.user is not None or \"email\" in notification.data\n\n @classmethod\n def _get_mailaddress(cls, notification):\n if notification.user:\n return f\"{notification.user.get_full_name()} <{notification.user.email}>\"\n return notification.data.get(\"email\")\n\n @classmethod\n def send(cls, notification):\n send_mail(\n to=[cls._get_mailaddress(notification)],\n subject=notification.subject,\n plaintext=notification.as_plaintext(),\n html=notification.as_html(),\n is_autogenerated=True,\n )\n\n\nclass WebPushNotificationBackend(AbstractNotificationBackend):\n slug = \"ephios_backend_webpush\"\n title = _(\"via push notification\")\n\n @classmethod\n def send(cls, notification):\n payload = {\n \"head\": str(notification.subject),\n \"body\": notification.body,\n \"icon\": \"/static/ephios/img/ephios-symbol-red.svg\",\n }\n if actions := notification.get_actions():\n payload[\"url\"] = actions[0][1]\n send_user_notification(user=notification.user, payload=payload, ttl=1000)\n\n\nCORE_NOTIFICATION_BACKENDS = [EmailNotificationBackend, WebPushNotificationBackend]\n", "path": "ephios/core/services/notifications/backends.py"}]}
| 1,520 | 140 |
gh_patches_debug_9171
|
rasdani/github-patches
|
git_diff
|
nilearn__nilearn-4267
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Adding tests for rapid inspection of visual elements
**What would you like changed/added and why?**
Quoting @jeromedockes here, from https://github.com/nilearn/nilearn/pull/2191#issuecomment-546278888
Since we are adding reports etc. we should add more meaningful inspection of generated plots and reports in the tests
**What would be the benefit? Does the change make something easier to use?**
I think CircleCI does all this but it takes way too long. Maybe a faster simpler test to quickly generate plots and reports to visually examine will be useful?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `nilearn/reporting/__init__.py`
Content:
```
1 """Reporting code for nilearn.
2
3 This module implements plotting functions useful to report analysis results.
4
5 Author: Martin Perez-Guevara, Elvis Dohmatob, 2017
6 """
7
8 from nilearn.reporting.get_clusters_table import get_clusters_table
9 from nilearn.reporting.glm_reporter import make_glm_report
10 from nilearn.reporting.html_report import HTMLReport
11
12 __all__ = [
13 "HTMLReport",
14 "get_clusters_table",
15 "make_glm_report",
16 ]
17
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/nilearn/reporting/__init__.py b/nilearn/reporting/__init__.py
--- a/nilearn/reporting/__init__.py
+++ b/nilearn/reporting/__init__.py
@@ -1,10 +1,10 @@
"""Reporting code for nilearn.
This module implements plotting functions useful to report analysis results.
-
-Author: Martin Perez-Guevara, Elvis Dohmatob, 2017
"""
+# Author: Martin Perez-Guevara, Elvis Dohmatob, 2017
+
from nilearn.reporting.get_clusters_table import get_clusters_table
from nilearn.reporting.glm_reporter import make_glm_report
from nilearn.reporting.html_report import HTMLReport
|
{"golden_diff": "diff --git a/nilearn/reporting/__init__.py b/nilearn/reporting/__init__.py\n--- a/nilearn/reporting/__init__.py\n+++ b/nilearn/reporting/__init__.py\n@@ -1,10 +1,10 @@\n \"\"\"Reporting code for nilearn.\n \n This module implements plotting functions useful to report analysis results.\n-\n-Author: Martin Perez-Guevara, Elvis Dohmatob, 2017\n \"\"\"\n \n+# Author: Martin Perez-Guevara, Elvis Dohmatob, 2017\n+\n from nilearn.reporting.get_clusters_table import get_clusters_table\n from nilearn.reporting.glm_reporter import make_glm_report\n from nilearn.reporting.html_report import HTMLReport\n", "issue": "Adding tests for rapid inspection of visual elements\n**What would you like changed/added and why?**\r\n\r\nQuoting @jeromedockes here, from https://github.com/nilearn/nilearn/pull/2191#issuecomment-546278888\r\nSince we are adding reports etc. we should add more meaningful inspection of generated plots and reports in the tests\r\n\r\n**What would be the benefit? Does the change make something easier to use?**\r\n\r\n I think CircleCI does all this but it takes way too long. Maybe a faster simpler test to quickly generate plots and reports to visually examine will be useful?\n", "before_files": [{"content": "\"\"\"Reporting code for nilearn.\n\nThis module implements plotting functions useful to report analysis results.\n\nAuthor: Martin Perez-Guevara, Elvis Dohmatob, 2017\n\"\"\"\n\nfrom nilearn.reporting.get_clusters_table import get_clusters_table\nfrom nilearn.reporting.glm_reporter import make_glm_report\nfrom nilearn.reporting.html_report import HTMLReport\n\n__all__ = [\n \"HTMLReport\",\n \"get_clusters_table\",\n \"make_glm_report\",\n]\n", "path": "nilearn/reporting/__init__.py"}], "after_files": [{"content": "\"\"\"Reporting code for nilearn.\n\nThis module implements plotting functions useful to report analysis results.\n\"\"\"\n\n# Author: Martin Perez-Guevara, Elvis Dohmatob, 2017\n\nfrom nilearn.reporting.get_clusters_table import get_clusters_table\nfrom nilearn.reporting.glm_reporter import make_glm_report\nfrom nilearn.reporting.html_report import HTMLReport\n\n__all__ = [\n \"HTMLReport\",\n \"get_clusters_table\",\n \"make_glm_report\",\n]\n", "path": "nilearn/reporting/__init__.py"}]}
| 531 | 165 |
gh_patches_debug_12341
|
rasdani/github-patches
|
git_diff
|
aio-libs__aiohttp-3065
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Change appveyor account
https://github.com/aio-libs/aiohttp/pull/2990#discussion_r187559585:
> This requires owner privileges AFAIR, so I can't. It's a bit tricky, but the bottom line is that you create a team there mapped to the team in GitHub and give it some access there. Also you need to somehow create an org link there.
> Here you go: https://www.appveyor.com/docs/team-setup/#setting-up-appveyor-account-for-github-organization
Create a separate account called aio-libs and then add your personal account as its administrator. You can also create team mapped to some team in GitHub, allowing its members to cancel/rerun builds.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 import codecs
2 import pathlib
3 import re
4 import sys
5 from distutils.command.build_ext import build_ext
6 from distutils.errors import (CCompilerError, DistutilsExecError,
7 DistutilsPlatformError)
8
9 from setuptools import Extension, setup
10
11
12 if sys.version_info < (3, 5, 3):
13 raise RuntimeError("aiohttp 3.x requires Python 3.5.3+")
14
15
16 try:
17 from Cython.Build import cythonize
18 USE_CYTHON = True
19 except ImportError:
20 USE_CYTHON = False
21
22 ext = '.pyx' if USE_CYTHON else '.c'
23
24
25 extensions = [Extension('aiohttp._websocket', ['aiohttp/_websocket' + ext]),
26 Extension('aiohttp._http_parser',
27 ['aiohttp/_http_parser' + ext,
28 'vendor/http-parser/http_parser.c'],
29 define_macros=[('HTTP_PARSER_STRICT', 0)],
30 ),
31 Extension('aiohttp._frozenlist',
32 ['aiohttp/_frozenlist' + ext]),
33 Extension('aiohttp._helpers',
34 ['aiohttp/_helpers' + ext]),
35 Extension('aiohttp._http_writer',
36 ['aiohttp/_http_writer' + ext])]
37
38
39 if USE_CYTHON:
40 extensions = cythonize(extensions)
41
42
43 class BuildFailed(Exception):
44 pass
45
46
47 class ve_build_ext(build_ext):
48 # This class allows C extension building to fail.
49
50 def run(self):
51 try:
52 build_ext.run(self)
53 except (DistutilsPlatformError, FileNotFoundError):
54 raise BuildFailed()
55
56 def build_extension(self, ext):
57 try:
58 build_ext.build_extension(self, ext)
59 except (CCompilerError, DistutilsExecError,
60 DistutilsPlatformError, ValueError):
61 raise BuildFailed()
62
63
64 here = pathlib.Path(__file__).parent
65
66 txt = (here / 'aiohttp' / '__init__.py').read_text('utf-8')
67 try:
68 version = re.findall(r"^__version__ = '([^']+)'\r?$",
69 txt, re.M)[0]
70 except IndexError:
71 raise RuntimeError('Unable to determine version.')
72
73
74 install_requires = ['attrs>=17.3.0', 'chardet>=2.0,<4.0',
75 'multidict>=4.0,<5.0',
76 'async_timeout>=3.0,<4.0',
77 'yarl>=1.0,<2.0']
78
79 if sys.version_info < (3, 7):
80 install_requires.append('idna-ssl>=1.0')
81
82
83 def read(f):
84 return (here / f).read_text('utf-8').strip()
85
86
87 NEEDS_PYTEST = {'pytest', 'test'}.intersection(sys.argv)
88 pytest_runner = ['pytest-runner'] if NEEDS_PYTEST else []
89
90 tests_require = ['pytest', 'gunicorn',
91 'pytest-timeout', 'async-generator']
92
93
94 args = dict(
95 name='aiohttp',
96 version=version,
97 description='Async http client/server framework (asyncio)',
98 long_description='\n\n'.join((read('README.rst'), read('CHANGES.rst'))),
99 classifiers=[
100 'License :: OSI Approved :: Apache Software License',
101 'Intended Audience :: Developers',
102 'Programming Language :: Python',
103 'Programming Language :: Python :: 3',
104 'Programming Language :: Python :: 3.5',
105 'Programming Language :: Python :: 3.6',
106 'Programming Language :: Python :: 3.7',
107 'Development Status :: 5 - Production/Stable',
108 'Operating System :: POSIX',
109 'Operating System :: MacOS :: MacOS X',
110 'Operating System :: Microsoft :: Windows',
111 'Topic :: Internet :: WWW/HTTP',
112 'Framework :: AsyncIO',
113 ],
114 author='Nikolay Kim',
115 author_email='[email protected]',
116 maintainer=', '.join(('Nikolay Kim <[email protected]>',
117 'Andrew Svetlov <[email protected]>')),
118 maintainer_email='[email protected]',
119 url='https://github.com/aio-libs/aiohttp',
120 project_urls={
121 'Chat: Gitter': 'https://gitter.im/aio-libs/Lobby',
122 'CI: AppVeyor': 'https://ci.appveyor.com/project/asvetlov/aiohttp', # FIXME: move under aio-libs/* slug
123 'CI: Circle': 'https://circleci.com/gh/aio-libs/aiohttp',
124 'CI: Shippable': 'https://app.shippable.com/github/aio-libs/aiohttp',
125 'CI: Travis': 'https://travis-ci.com/aio-libs/aiohttp',
126 'Coverage: codecov': 'https://codecov.io/github/aio-libs/aiohttp',
127 'Docs: RTD': 'https://docs.aiohttp.org',
128 'GitHub: issues': 'https://github.com/aio-libs/aiohttp/issues',
129 'GitHub: repo': 'https://github.com/aio-libs/aiohttp',
130 },
131 license='Apache 2',
132 packages=['aiohttp'],
133 python_requires='>=3.5.3',
134 install_requires=install_requires,
135 tests_require=tests_require,
136 setup_requires=pytest_runner,
137 include_package_data=True,
138 ext_modules=extensions,
139 cmdclass=dict(build_ext=ve_build_ext),
140 )
141
142 try:
143 setup(**args)
144 except BuildFailed:
145 print("************************************************************")
146 print("Cannot compile C accelerator module, use pure python version")
147 print("************************************************************")
148 del args['ext_modules']
149 del args['cmdclass']
150 setup(**args)
151
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -119,7 +119,7 @@
url='https://github.com/aio-libs/aiohttp',
project_urls={
'Chat: Gitter': 'https://gitter.im/aio-libs/Lobby',
- 'CI: AppVeyor': 'https://ci.appveyor.com/project/asvetlov/aiohttp', # FIXME: move under aio-libs/* slug
+ 'CI: AppVeyor': 'https://ci.appveyor.com/project/aio-libs/aiohttp',
'CI: Circle': 'https://circleci.com/gh/aio-libs/aiohttp',
'CI: Shippable': 'https://app.shippable.com/github/aio-libs/aiohttp',
'CI: Travis': 'https://travis-ci.com/aio-libs/aiohttp',
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -119,7 +119,7 @@\n url='https://github.com/aio-libs/aiohttp',\n project_urls={\n 'Chat: Gitter': 'https://gitter.im/aio-libs/Lobby',\n- 'CI: AppVeyor': 'https://ci.appveyor.com/project/asvetlov/aiohttp', # FIXME: move under aio-libs/* slug\n+ 'CI: AppVeyor': 'https://ci.appveyor.com/project/aio-libs/aiohttp',\n 'CI: Circle': 'https://circleci.com/gh/aio-libs/aiohttp',\n 'CI: Shippable': 'https://app.shippable.com/github/aio-libs/aiohttp',\n 'CI: Travis': 'https://travis-ci.com/aio-libs/aiohttp',\n", "issue": "Change appveyor account\nhttps://github.com/aio-libs/aiohttp/pull/2990#discussion_r187559585:\r\n\r\n> This requires owner privileges AFAIR, so I can't. It's a bit tricky, but the bottom line is that you create a team there mapped to the team in GitHub and give it some access there. Also you need to somehow create an org link there.\r\n\r\n> Here you go: https://www.appveyor.com/docs/team-setup/#setting-up-appveyor-account-for-github-organization\r\nCreate a separate account called aio-libs and then add your personal account as its administrator. You can also create team mapped to some team in GitHub, allowing its members to cancel/rerun builds.\n", "before_files": [{"content": "import codecs\nimport pathlib\nimport re\nimport sys\nfrom distutils.command.build_ext import build_ext\nfrom distutils.errors import (CCompilerError, DistutilsExecError,\n DistutilsPlatformError)\n\nfrom setuptools import Extension, setup\n\n\nif sys.version_info < (3, 5, 3):\n raise RuntimeError(\"aiohttp 3.x requires Python 3.5.3+\")\n\n\ntry:\n from Cython.Build import cythonize\n USE_CYTHON = True\nexcept ImportError:\n USE_CYTHON = False\n\next = '.pyx' if USE_CYTHON else '.c'\n\n\nextensions = [Extension('aiohttp._websocket', ['aiohttp/_websocket' + ext]),\n Extension('aiohttp._http_parser',\n ['aiohttp/_http_parser' + ext,\n 'vendor/http-parser/http_parser.c'],\n define_macros=[('HTTP_PARSER_STRICT', 0)],\n ),\n Extension('aiohttp._frozenlist',\n ['aiohttp/_frozenlist' + ext]),\n Extension('aiohttp._helpers',\n ['aiohttp/_helpers' + ext]),\n Extension('aiohttp._http_writer',\n ['aiohttp/_http_writer' + ext])]\n\n\nif USE_CYTHON:\n extensions = cythonize(extensions)\n\n\nclass BuildFailed(Exception):\n pass\n\n\nclass ve_build_ext(build_ext):\n # This class allows C extension building to fail.\n\n def run(self):\n try:\n build_ext.run(self)\n except (DistutilsPlatformError, FileNotFoundError):\n raise BuildFailed()\n\n def build_extension(self, ext):\n try:\n build_ext.build_extension(self, ext)\n except (CCompilerError, DistutilsExecError,\n DistutilsPlatformError, ValueError):\n raise BuildFailed()\n\n\nhere = pathlib.Path(__file__).parent\n\ntxt = (here / 'aiohttp' / '__init__.py').read_text('utf-8')\ntry:\n version = re.findall(r\"^__version__ = '([^']+)'\\r?$\",\n txt, re.M)[0]\nexcept IndexError:\n raise RuntimeError('Unable to determine version.')\n\n\ninstall_requires = ['attrs>=17.3.0', 'chardet>=2.0,<4.0',\n 'multidict>=4.0,<5.0',\n 'async_timeout>=3.0,<4.0',\n 'yarl>=1.0,<2.0']\n\nif sys.version_info < (3, 7):\n install_requires.append('idna-ssl>=1.0')\n\n\ndef read(f):\n return (here / f).read_text('utf-8').strip()\n\n\nNEEDS_PYTEST = {'pytest', 'test'}.intersection(sys.argv)\npytest_runner = ['pytest-runner'] if NEEDS_PYTEST else []\n\ntests_require = ['pytest', 'gunicorn',\n 'pytest-timeout', 'async-generator']\n\n\nargs = dict(\n name='aiohttp',\n version=version,\n description='Async http client/server framework (asyncio)',\n long_description='\\n\\n'.join((read('README.rst'), read('CHANGES.rst'))),\n classifiers=[\n 'License :: OSI Approved :: Apache Software License',\n 'Intended Audience :: Developers',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Development Status :: 5 - Production/Stable',\n 'Operating System :: POSIX',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: Microsoft :: Windows',\n 'Topic :: Internet :: WWW/HTTP',\n 'Framework :: AsyncIO',\n ],\n author='Nikolay Kim',\n author_email='[email protected]',\n maintainer=', '.join(('Nikolay Kim <[email protected]>',\n 'Andrew Svetlov <[email protected]>')),\n maintainer_email='[email protected]',\n url='https://github.com/aio-libs/aiohttp',\n project_urls={\n 'Chat: Gitter': 'https://gitter.im/aio-libs/Lobby',\n 'CI: AppVeyor': 'https://ci.appveyor.com/project/asvetlov/aiohttp', # FIXME: move under aio-libs/* slug\n 'CI: Circle': 'https://circleci.com/gh/aio-libs/aiohttp',\n 'CI: Shippable': 'https://app.shippable.com/github/aio-libs/aiohttp',\n 'CI: Travis': 'https://travis-ci.com/aio-libs/aiohttp',\n 'Coverage: codecov': 'https://codecov.io/github/aio-libs/aiohttp',\n 'Docs: RTD': 'https://docs.aiohttp.org',\n 'GitHub: issues': 'https://github.com/aio-libs/aiohttp/issues',\n 'GitHub: repo': 'https://github.com/aio-libs/aiohttp',\n },\n license='Apache 2',\n packages=['aiohttp'],\n python_requires='>=3.5.3',\n install_requires=install_requires,\n tests_require=tests_require,\n setup_requires=pytest_runner,\n include_package_data=True,\n ext_modules=extensions,\n cmdclass=dict(build_ext=ve_build_ext),\n)\n\ntry:\n setup(**args)\nexcept BuildFailed:\n print(\"************************************************************\")\n print(\"Cannot compile C accelerator module, use pure python version\")\n print(\"************************************************************\")\n del args['ext_modules']\n del args['cmdclass']\n setup(**args)\n", "path": "setup.py"}], "after_files": [{"content": "import codecs\nimport pathlib\nimport re\nimport sys\nfrom distutils.command.build_ext import build_ext\nfrom distutils.errors import (CCompilerError, DistutilsExecError,\n DistutilsPlatformError)\n\nfrom setuptools import Extension, setup\n\n\nif sys.version_info < (3, 5, 3):\n raise RuntimeError(\"aiohttp 3.x requires Python 3.5.3+\")\n\n\ntry:\n from Cython.Build import cythonize\n USE_CYTHON = True\nexcept ImportError:\n USE_CYTHON = False\n\next = '.pyx' if USE_CYTHON else '.c'\n\n\nextensions = [Extension('aiohttp._websocket', ['aiohttp/_websocket' + ext]),\n Extension('aiohttp._http_parser',\n ['aiohttp/_http_parser' + ext,\n 'vendor/http-parser/http_parser.c'],\n define_macros=[('HTTP_PARSER_STRICT', 0)],\n ),\n Extension('aiohttp._frozenlist',\n ['aiohttp/_frozenlist' + ext]),\n Extension('aiohttp._helpers',\n ['aiohttp/_helpers' + ext]),\n Extension('aiohttp._http_writer',\n ['aiohttp/_http_writer' + ext])]\n\n\nif USE_CYTHON:\n extensions = cythonize(extensions)\n\n\nclass BuildFailed(Exception):\n pass\n\n\nclass ve_build_ext(build_ext):\n # This class allows C extension building to fail.\n\n def run(self):\n try:\n build_ext.run(self)\n except (DistutilsPlatformError, FileNotFoundError):\n raise BuildFailed()\n\n def build_extension(self, ext):\n try:\n build_ext.build_extension(self, ext)\n except (CCompilerError, DistutilsExecError,\n DistutilsPlatformError, ValueError):\n raise BuildFailed()\n\n\nhere = pathlib.Path(__file__).parent\n\ntxt = (here / 'aiohttp' / '__init__.py').read_text('utf-8')\ntry:\n version = re.findall(r\"^__version__ = '([^']+)'\\r?$\",\n txt, re.M)[0]\nexcept IndexError:\n raise RuntimeError('Unable to determine version.')\n\n\ninstall_requires = ['attrs>=17.3.0', 'chardet>=2.0,<4.0',\n 'multidict>=4.0,<5.0',\n 'async_timeout>=3.0,<4.0',\n 'yarl>=1.0,<2.0']\n\nif sys.version_info < (3, 7):\n install_requires.append('idna-ssl>=1.0')\n\n\ndef read(f):\n return (here / f).read_text('utf-8').strip()\n\n\nNEEDS_PYTEST = {'pytest', 'test'}.intersection(sys.argv)\npytest_runner = ['pytest-runner'] if NEEDS_PYTEST else []\n\ntests_require = ['pytest', 'gunicorn',\n 'pytest-timeout', 'async-generator']\n\n\nargs = dict(\n name='aiohttp',\n version=version,\n description='Async http client/server framework (asyncio)',\n long_description='\\n\\n'.join((read('README.rst'), read('CHANGES.rst'))),\n classifiers=[\n 'License :: OSI Approved :: Apache Software License',\n 'Intended Audience :: Developers',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Development Status :: 5 - Production/Stable',\n 'Operating System :: POSIX',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: Microsoft :: Windows',\n 'Topic :: Internet :: WWW/HTTP',\n 'Framework :: AsyncIO',\n ],\n author='Nikolay Kim',\n author_email='[email protected]',\n maintainer=', '.join(('Nikolay Kim <[email protected]>',\n 'Andrew Svetlov <[email protected]>')),\n maintainer_email='[email protected]',\n url='https://github.com/aio-libs/aiohttp',\n project_urls={\n 'Chat: Gitter': 'https://gitter.im/aio-libs/Lobby',\n 'CI: AppVeyor': 'https://ci.appveyor.com/project/aio-libs/aiohttp',\n 'CI: Circle': 'https://circleci.com/gh/aio-libs/aiohttp',\n 'CI: Shippable': 'https://app.shippable.com/github/aio-libs/aiohttp',\n 'CI: Travis': 'https://travis-ci.com/aio-libs/aiohttp',\n 'Coverage: codecov': 'https://codecov.io/github/aio-libs/aiohttp',\n 'Docs: RTD': 'https://docs.aiohttp.org',\n 'GitHub: issues': 'https://github.com/aio-libs/aiohttp/issues',\n 'GitHub: repo': 'https://github.com/aio-libs/aiohttp',\n },\n license='Apache 2',\n packages=['aiohttp'],\n python_requires='>=3.5.3',\n install_requires=install_requires,\n tests_require=tests_require,\n setup_requires=pytest_runner,\n include_package_data=True,\n ext_modules=extensions,\n cmdclass=dict(build_ext=ve_build_ext),\n)\n\ntry:\n setup(**args)\nexcept BuildFailed:\n print(\"************************************************************\")\n print(\"Cannot compile C accelerator module, use pure python version\")\n print(\"************************************************************\")\n del args['ext_modules']\n del args['cmdclass']\n setup(**args)\n", "path": "setup.py"}]}
| 1,999 | 204 |
gh_patches_debug_9225
|
rasdani/github-patches
|
git_diff
|
open-mmlab__mmpose-259
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Pylint: W0707
```bash
mmpose/utils/hooks.py:37:20: W0707: Consider explicitly re-raising using the 'from' keyword (raise-missing-from)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mmpose/utils/hooks.py`
Content:
```
1 import functools
2
3
4 class OutputHook:
5
6 def __init__(self, module, outputs=None, as_tensor=False):
7 self.outputs = outputs
8 self.as_tensor = as_tensor
9 self.layer_outputs = {}
10 self.register(module)
11
12 def register(self, module):
13
14 def hook_wrapper(name):
15
16 def hook(model, input, output):
17 if self.as_tensor:
18 self.layer_outputs[name] = output
19 else:
20 if isinstance(output, list):
21 self.layer_outputs[name] = [
22 out.detach().cpu().numpy() for out in output
23 ]
24 else:
25 self.layer_outputs[name] = output.detach().cpu().numpy(
26 )
27
28 return hook
29
30 self.handles = []
31 if isinstance(self.outputs, (list, tuple)):
32 for name in self.outputs:
33 try:
34 layer = rgetattr(module, name)
35 h = layer.register_forward_hook(hook_wrapper(name))
36 except AttributeError:
37 raise AttributeError(f'Module {name} not found')
38 self.handles.append(h)
39
40 def remove(self):
41 for h in self.handles:
42 h.remove()
43
44 def __enter__(self):
45 return self
46
47 def __exit__(self, exc_type, exc_val, exc_tb):
48 self.remove()
49
50
51 # using wonder's beautiful simplification:
52 # https://stackoverflow.com/questions/31174295/getattr-and-setattr-on-nested-objects
53 def rgetattr(obj, attr, *args):
54
55 def _getattr(obj, attr):
56 return getattr(obj, attr, *args)
57
58 return functools.reduce(_getattr, [obj] + attr.split('.'))
59
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/mmpose/utils/hooks.py b/mmpose/utils/hooks.py
--- a/mmpose/utils/hooks.py
+++ b/mmpose/utils/hooks.py
@@ -33,8 +33,9 @@
try:
layer = rgetattr(module, name)
h = layer.register_forward_hook(hook_wrapper(name))
- except AttributeError:
- raise AttributeError(f'Module {name} not found')
+ except ModuleNotFoundError as module_not_found:
+ raise ModuleNotFoundError(
+ f'Module {name} not found') from module_not_found
self.handles.append(h)
def remove(self):
|
{"golden_diff": "diff --git a/mmpose/utils/hooks.py b/mmpose/utils/hooks.py\n--- a/mmpose/utils/hooks.py\n+++ b/mmpose/utils/hooks.py\n@@ -33,8 +33,9 @@\n try:\n layer = rgetattr(module, name)\n h = layer.register_forward_hook(hook_wrapper(name))\n- except AttributeError:\n- raise AttributeError(f'Module {name} not found')\n+ except ModuleNotFoundError as module_not_found:\n+ raise ModuleNotFoundError(\n+ f'Module {name} not found') from module_not_found\n self.handles.append(h)\n \n def remove(self):\n", "issue": "Pylint: W0707\n```bash\r\nmmpose/utils/hooks.py:37:20: W0707: Consider explicitly re-raising using the 'from' keyword (raise-missing-from)\r\n```\n", "before_files": [{"content": "import functools\n\n\nclass OutputHook:\n\n def __init__(self, module, outputs=None, as_tensor=False):\n self.outputs = outputs\n self.as_tensor = as_tensor\n self.layer_outputs = {}\n self.register(module)\n\n def register(self, module):\n\n def hook_wrapper(name):\n\n def hook(model, input, output):\n if self.as_tensor:\n self.layer_outputs[name] = output\n else:\n if isinstance(output, list):\n self.layer_outputs[name] = [\n out.detach().cpu().numpy() for out in output\n ]\n else:\n self.layer_outputs[name] = output.detach().cpu().numpy(\n )\n\n return hook\n\n self.handles = []\n if isinstance(self.outputs, (list, tuple)):\n for name in self.outputs:\n try:\n layer = rgetattr(module, name)\n h = layer.register_forward_hook(hook_wrapper(name))\n except AttributeError:\n raise AttributeError(f'Module {name} not found')\n self.handles.append(h)\n\n def remove(self):\n for h in self.handles:\n h.remove()\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.remove()\n\n\n# using wonder's beautiful simplification:\n# https://stackoverflow.com/questions/31174295/getattr-and-setattr-on-nested-objects\ndef rgetattr(obj, attr, *args):\n\n def _getattr(obj, attr):\n return getattr(obj, attr, *args)\n\n return functools.reduce(_getattr, [obj] + attr.split('.'))\n", "path": "mmpose/utils/hooks.py"}], "after_files": [{"content": "import functools\n\n\nclass OutputHook:\n\n def __init__(self, module, outputs=None, as_tensor=False):\n self.outputs = outputs\n self.as_tensor = as_tensor\n self.layer_outputs = {}\n self.register(module)\n\n def register(self, module):\n\n def hook_wrapper(name):\n\n def hook(model, input, output):\n if self.as_tensor:\n self.layer_outputs[name] = output\n else:\n if isinstance(output, list):\n self.layer_outputs[name] = [\n out.detach().cpu().numpy() for out in output\n ]\n else:\n self.layer_outputs[name] = output.detach().cpu().numpy(\n )\n\n return hook\n\n self.handles = []\n if isinstance(self.outputs, (list, tuple)):\n for name in self.outputs:\n try:\n layer = rgetattr(module, name)\n h = layer.register_forward_hook(hook_wrapper(name))\n except ModuleNotFoundError as module_not_found:\n raise ModuleNotFoundError(\n f'Module {name} not found') from module_not_found\n self.handles.append(h)\n\n def remove(self):\n for h in self.handles:\n h.remove()\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.remove()\n\n\n# using wonder's beautiful simplification:\n# https://stackoverflow.com/questions/31174295/getattr-and-setattr-on-nested-objects\ndef rgetattr(obj, attr, *args):\n\n def _getattr(obj, attr):\n return getattr(obj, attr, *args)\n\n return functools.reduce(_getattr, [obj] + attr.split('.'))\n", "path": "mmpose/utils/hooks.py"}]}
| 772 | 135 |
gh_patches_debug_23405
|
rasdani/github-patches
|
git_diff
|
Theano__Theano-5045
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Theano deprecation warning
I have started running my code with warnings as errors. Theano trips the deprecation warning:
```
File "/opt/local/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/configparser.py", line 1209, in __init__
DeprecationWarning, stacklevel=2
DeprecationWarning: The SafeConfigParser class has been renamed to ConfigParser in Python 3.2. This alias will be removed in future versions. Use ConfigParser directly instead.
```
Please use:
```
theano_cfg = (ConfigParser.ConfigParser if six.PY3 else ConfigParser.SafeConfigParser)(…
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `theano/misc/windows.py`
Content:
```
1 from __future__ import absolute_import, print_function, division
2 import os
3 import subprocess
4
5
6 def subprocess_Popen(command, **params):
7 """
8 Utility function to work around windows behavior that open windows.
9
10 :see: call_subprocess_Popen and output_subprocess_Popen
11 """
12 startupinfo = None
13 if os.name == 'nt':
14 startupinfo = subprocess.STARTUPINFO()
15 try:
16 startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
17 except AttributeError:
18 startupinfo.dwFlags |= subprocess._subprocess.STARTF_USESHOWWINDOW
19
20 # Anaconda for Windows does not always provide .exe files
21 # in the PATH, they also have .bat files that call the corresponding
22 # executable. For instance, "g++.bat" is in the PATH, not "g++.exe"
23 # Unless "shell=True", "g++.bat" is not executed when trying to
24 # execute "g++" without extensions.
25 # (Executing "g++.bat" explicitly would also work.)
26 params['shell'] = True
27 # "If shell is True, it is recommended to pass args as a string rather than as a sequence." (cite taken from https://docs.python.org/2/library/subprocess.html#frequently-used-arguments)
28 # In case when command arguments have spaces, passing a command as a list will result in incorrect arguments break down, and consequently
29 # in "The filename, directory name, or volume label syntax is incorrect" error message.
30 # Passing the command as a single string solves this problem.
31 if isinstance(command, list):
32 command = ' '.join(command)
33
34 # Using the dummy file descriptors below is a workaround for a
35 # crash experienced in an unusual Python 2.4.4 Windows environment
36 # with the default None values.
37 stdin = None
38 if "stdin" not in params:
39 stdin = open(os.devnull)
40 params['stdin'] = stdin.fileno()
41
42 try:
43 proc = subprocess.Popen(command, startupinfo=startupinfo, **params)
44 finally:
45 if stdin is not None:
46 del stdin
47 return proc
48
49
50 def call_subprocess_Popen(command, **params):
51 """
52 Calls subprocess_Popen and discards the output, returning only the
53 exit code.
54 """
55 if 'stdout' in params or 'stderr' in params:
56 raise TypeError("don't use stderr or stdout with call_subprocess_Popen")
57 with open(os.devnull, 'wb') as null:
58 # stdin to devnull is a workaround for a crash in a weird Windows
59 # environment where sys.stdin was None
60 params.setdefault('stdin', null)
61 params['stdout'] = null
62 params['stderr'] = null
63 p = subprocess_Popen(command, **params)
64 returncode = p.wait()
65 return returncode
66
67
68 def output_subprocess_Popen(command, **params):
69 """
70 Calls subprocess_Popen, returning the output, error and exit code
71 in a tuple.
72 """
73 if 'stdout' in params or 'stderr' in params:
74 raise TypeError("don't use stderr or stdout with output_subprocess_Popen")
75 # stdin to devnull is a workaround for a crash in a weird Windows
76 # environement where sys.stdin was None
77 if not hasattr(params, 'stdin'):
78 null = open(os.devnull, 'wb')
79 params['stdin'] = null
80 params['stdout'] = subprocess.PIPE
81 params['stderr'] = subprocess.PIPE
82 p = subprocess_Popen(command, **params)
83 # we need to use communicate to make sure we don't deadlock around
84 # the stdour/stderr pipe.
85 out = p.communicate()
86 return out + (p.returncode,)
87
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/theano/misc/windows.py b/theano/misc/windows.py
--- a/theano/misc/windows.py
+++ b/theano/misc/windows.py
@@ -43,7 +43,7 @@
proc = subprocess.Popen(command, startupinfo=startupinfo, **params)
finally:
if stdin is not None:
- del stdin
+ stdin.close()
return proc
@@ -72,15 +72,10 @@
"""
if 'stdout' in params or 'stderr' in params:
raise TypeError("don't use stderr or stdout with output_subprocess_Popen")
- # stdin to devnull is a workaround for a crash in a weird Windows
- # environement where sys.stdin was None
- if not hasattr(params, 'stdin'):
- null = open(os.devnull, 'wb')
- params['stdin'] = null
params['stdout'] = subprocess.PIPE
params['stderr'] = subprocess.PIPE
p = subprocess_Popen(command, **params)
# we need to use communicate to make sure we don't deadlock around
- # the stdour/stderr pipe.
+ # the stdout/stderr pipe.
out = p.communicate()
return out + (p.returncode,)
|
{"golden_diff": "diff --git a/theano/misc/windows.py b/theano/misc/windows.py\n--- a/theano/misc/windows.py\n+++ b/theano/misc/windows.py\n@@ -43,7 +43,7 @@\n proc = subprocess.Popen(command, startupinfo=startupinfo, **params)\n finally:\n if stdin is not None:\n- del stdin\n+ stdin.close()\n return proc\n \n \n@@ -72,15 +72,10 @@\n \"\"\"\n if 'stdout' in params or 'stderr' in params:\n raise TypeError(\"don't use stderr or stdout with output_subprocess_Popen\")\n- # stdin to devnull is a workaround for a crash in a weird Windows\n- # environement where sys.stdin was None\n- if not hasattr(params, 'stdin'):\n- null = open(os.devnull, 'wb')\n- params['stdin'] = null\n params['stdout'] = subprocess.PIPE\n params['stderr'] = subprocess.PIPE\n p = subprocess_Popen(command, **params)\n # we need to use communicate to make sure we don't deadlock around\n- # the stdour/stderr pipe.\n+ # the stdout/stderr pipe.\n out = p.communicate()\n return out + (p.returncode,)\n", "issue": "Theano deprecation warning\nI have started running my code with warnings as errors. Theano trips the deprecation warning:\n\n```\n File \"/opt/local/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/configparser.py\", line 1209, in __init__\n DeprecationWarning, stacklevel=2\nDeprecationWarning: The SafeConfigParser class has been renamed to ConfigParser in Python 3.2. This alias will be removed in future versions. Use ConfigParser directly instead.\n```\n\nPlease use:\n\n```\ntheano_cfg = (ConfigParser.ConfigParser if six.PY3 else ConfigParser.SafeConfigParser)(\u2026\n```\n\n", "before_files": [{"content": "from __future__ import absolute_import, print_function, division\nimport os\nimport subprocess\n\n\ndef subprocess_Popen(command, **params):\n \"\"\"\n Utility function to work around windows behavior that open windows.\n\n :see: call_subprocess_Popen and output_subprocess_Popen\n \"\"\"\n startupinfo = None\n if os.name == 'nt':\n startupinfo = subprocess.STARTUPINFO()\n try:\n startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW\n except AttributeError:\n startupinfo.dwFlags |= subprocess._subprocess.STARTF_USESHOWWINDOW\n\n # Anaconda for Windows does not always provide .exe files\n # in the PATH, they also have .bat files that call the corresponding\n # executable. For instance, \"g++.bat\" is in the PATH, not \"g++.exe\"\n # Unless \"shell=True\", \"g++.bat\" is not executed when trying to\n # execute \"g++\" without extensions.\n # (Executing \"g++.bat\" explicitly would also work.)\n params['shell'] = True\n # \"If shell is True, it is recommended to pass args as a string rather than as a sequence.\" (cite taken from https://docs.python.org/2/library/subprocess.html#frequently-used-arguments)\n # In case when command arguments have spaces, passing a command as a list will result in incorrect arguments break down, and consequently\n # in \"The filename, directory name, or volume label syntax is incorrect\" error message.\n # Passing the command as a single string solves this problem.\n if isinstance(command, list):\n command = ' '.join(command)\n\n # Using the dummy file descriptors below is a workaround for a\n # crash experienced in an unusual Python 2.4.4 Windows environment\n # with the default None values.\n stdin = None\n if \"stdin\" not in params:\n stdin = open(os.devnull)\n params['stdin'] = stdin.fileno()\n\n try:\n proc = subprocess.Popen(command, startupinfo=startupinfo, **params)\n finally:\n if stdin is not None:\n del stdin\n return proc\n\n\ndef call_subprocess_Popen(command, **params):\n \"\"\"\n Calls subprocess_Popen and discards the output, returning only the\n exit code.\n \"\"\"\n if 'stdout' in params or 'stderr' in params:\n raise TypeError(\"don't use stderr or stdout with call_subprocess_Popen\")\n with open(os.devnull, 'wb') as null:\n # stdin to devnull is a workaround for a crash in a weird Windows\n # environment where sys.stdin was None\n params.setdefault('stdin', null)\n params['stdout'] = null\n params['stderr'] = null\n p = subprocess_Popen(command, **params)\n returncode = p.wait()\n return returncode\n\n\ndef output_subprocess_Popen(command, **params):\n \"\"\"\n Calls subprocess_Popen, returning the output, error and exit code\n in a tuple.\n \"\"\"\n if 'stdout' in params or 'stderr' in params:\n raise TypeError(\"don't use stderr or stdout with output_subprocess_Popen\")\n # stdin to devnull is a workaround for a crash in a weird Windows\n # environement where sys.stdin was None\n if not hasattr(params, 'stdin'):\n null = open(os.devnull, 'wb')\n params['stdin'] = null\n params['stdout'] = subprocess.PIPE\n params['stderr'] = subprocess.PIPE\n p = subprocess_Popen(command, **params)\n # we need to use communicate to make sure we don't deadlock around\n # the stdour/stderr pipe.\n out = p.communicate()\n return out + (p.returncode,)\n", "path": "theano/misc/windows.py"}], "after_files": [{"content": "from __future__ import absolute_import, print_function, division\nimport os\nimport subprocess\n\n\ndef subprocess_Popen(command, **params):\n \"\"\"\n Utility function to work around windows behavior that open windows.\n\n :see: call_subprocess_Popen and output_subprocess_Popen\n \"\"\"\n startupinfo = None\n if os.name == 'nt':\n startupinfo = subprocess.STARTUPINFO()\n try:\n startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW\n except AttributeError:\n startupinfo.dwFlags |= subprocess._subprocess.STARTF_USESHOWWINDOW\n\n # Anaconda for Windows does not always provide .exe files\n # in the PATH, they also have .bat files that call the corresponding\n # executable. For instance, \"g++.bat\" is in the PATH, not \"g++.exe\"\n # Unless \"shell=True\", \"g++.bat\" is not executed when trying to\n # execute \"g++\" without extensions.\n # (Executing \"g++.bat\" explicitly would also work.)\n params['shell'] = True\n # \"If shell is True, it is recommended to pass args as a string rather than as a sequence.\" (cite taken from https://docs.python.org/2/library/subprocess.html#frequently-used-arguments)\n # In case when command arguments have spaces, passing a command as a list will result in incorrect arguments break down, and consequently\n # in \"The filename, directory name, or volume label syntax is incorrect\" error message.\n # Passing the command as a single string solves this problem.\n if isinstance(command, list):\n command = ' '.join(command)\n\n # Using the dummy file descriptors below is a workaround for a\n # crash experienced in an unusual Python 2.4.4 Windows environment\n # with the default None values.\n stdin = None\n if \"stdin\" not in params:\n stdin = open(os.devnull)\n params['stdin'] = stdin.fileno()\n\n try:\n proc = subprocess.Popen(command, startupinfo=startupinfo, **params)\n finally:\n if stdin is not None:\n stdin.close()\n return proc\n\n\ndef call_subprocess_Popen(command, **params):\n \"\"\"\n Calls subprocess_Popen and discards the output, returning only the\n exit code.\n \"\"\"\n if 'stdout' in params or 'stderr' in params:\n raise TypeError(\"don't use stderr or stdout with call_subprocess_Popen\")\n with open(os.devnull, 'wb') as null:\n # stdin to devnull is a workaround for a crash in a weird Windows\n # environment where sys.stdin was None\n params.setdefault('stdin', null)\n params['stdout'] = null\n params['stderr'] = null\n p = subprocess_Popen(command, **params)\n returncode = p.wait()\n return returncode\n\n\ndef output_subprocess_Popen(command, **params):\n \"\"\"\n Calls subprocess_Popen, returning the output, error and exit code\n in a tuple.\n \"\"\"\n if 'stdout' in params or 'stderr' in params:\n raise TypeError(\"don't use stderr or stdout with output_subprocess_Popen\")\n params['stdout'] = subprocess.PIPE\n params['stderr'] = subprocess.PIPE\n p = subprocess_Popen(command, **params)\n # we need to use communicate to make sure we don't deadlock around\n # the stdout/stderr pipe.\n out = p.communicate()\n return out + (p.returncode,)\n", "path": "theano/misc/windows.py"}]}
| 1,374 | 272 |
gh_patches_debug_35090
|
rasdani/github-patches
|
git_diff
|
chainer__chainer-1191
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Inconsistency of type checking between SoftmaxCrossEntropy and Accuracy
We often evaluate training procedure with both `softmax_cross_entropy` and `accuracy`, as default `chainer.links.Classifier` does. But there are cases `softmax_cross_entorpy(x, t)` is accepted but `accuracy(x, t)` raises error because of its type checking (and vise versa). Replicated softmax is one of a such examples (e.g. `x.shape = (B, C, N)` and `t.shape = (B, N)`). We need to reshape them manually in such a case.
It is convenient for users that shape checking of the two be more consistent.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `chainer/functions/evaluation/accuracy.py`
Content:
```
1 import numpy
2
3 from chainer import cuda
4 from chainer import function
5 from chainer.utils import type_check
6
7
8 class Accuracy(function.Function):
9
10 def __init__(self, ignore_label=None):
11 self.ignore_label = ignore_label
12
13 def check_type_forward(self, in_types):
14 type_check.expect(in_types.size() == 2)
15 x_type, t_type = in_types
16
17 type_check.expect(
18 x_type.dtype.kind == 'f',
19 x_type.ndim >= 2,
20 t_type.dtype == numpy.int32,
21 t_type.ndim == 1,
22 t_type.shape[0] == x_type.shape[0],
23 )
24 for i in range(2, x_type.ndim.eval()):
25 type_check.expect(x_type.shape[i] == 1)
26
27 def forward(self, inputs):
28 xp = cuda.get_array_module(*inputs)
29 y, t = inputs
30 y = y.reshape(len(y), -1) # flatten
31
32 if self.ignore_label is not None:
33 mask = (t == self.ignore_label)
34 ignore_cnt = mask.sum()
35
36 # will always be true when the true label is ignore_label
37 # TODO(henry0312)
38 # If cupy.where returns indexes, we could make the code better.
39 # Also, we would need Advanced Indexing.
40 pred = xp.where(mask, self.ignore_label, y.argmax(axis=1))
41 count = (pred == t).sum() - ignore_cnt
42 total = len(t) - ignore_cnt
43
44 if total == 0:
45 return xp.asarray(0.0, dtype=y.dtype),
46 else:
47 return xp.asarray(float(count) / total, dtype=y.dtype),
48 else:
49 pred = y.argmax(axis=1)
50 return xp.asarray((pred == t).mean(dtype=y.dtype)),
51
52
53 def accuracy(y, t, ignore_label=None):
54 """Computes muticlass classification accuracy of the minibatch.
55
56 Args:
57 y (Variable): Variable holding a matrix whose (i, j)-th element
58 indicates the score of the class j at the i-th example.
59 t (Variable): Variable holding an int32 vector of ground truth labels.
60 ignore_label (int or None): Skip calculating accuracy
61 if the ture label is ``ignore_label``.
62
63 Returns:
64 Variable: A variable holding a scalar array of the accuracy.
65
66 .. note:: This function is non-differentiable.
67
68 """
69 return Accuracy(ignore_label=ignore_label)(y, t)
70
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/chainer/functions/evaluation/accuracy.py b/chainer/functions/evaluation/accuracy.py
--- a/chainer/functions/evaluation/accuracy.py
+++ b/chainer/functions/evaluation/accuracy.py
@@ -1,4 +1,5 @@
import numpy
+import six
from chainer import cuda
from chainer import function
@@ -16,18 +17,21 @@
type_check.expect(
x_type.dtype.kind == 'f',
- x_type.ndim >= 2,
- t_type.dtype == numpy.int32,
- t_type.ndim == 1,
- t_type.shape[0] == x_type.shape[0],
+ t_type.dtype == numpy.int32
)
- for i in range(2, x_type.ndim.eval()):
+
+ t_ndim = t_type.ndim.eval()
+ type_check.expect(
+ x_type.ndim >= t_type.ndim,
+ x_type.shape[0] == t_type.shape[0],
+ x_type.shape[2: t_ndim + 1] == t_type.shape[1:]
+ )
+ for i in six.moves.range(t_ndim + 1, x_type.ndim.eval()):
type_check.expect(x_type.shape[i] == 1)
def forward(self, inputs):
xp = cuda.get_array_module(*inputs)
y, t = inputs
- y = y.reshape(len(y), -1) # flatten
if self.ignore_label is not None:
mask = (t == self.ignore_label)
@@ -37,16 +41,17 @@
# TODO(henry0312)
# If cupy.where returns indexes, we could make the code better.
# Also, we would need Advanced Indexing.
- pred = xp.where(mask, self.ignore_label, y.argmax(axis=1))
+ pred = xp.where(mask, self.ignore_label,
+ y.argmax(axis=1).reshape(t.shape))
count = (pred == t).sum() - ignore_cnt
- total = len(t) - ignore_cnt
+ total = t.size - ignore_cnt
if total == 0:
return xp.asarray(0.0, dtype=y.dtype),
else:
return xp.asarray(float(count) / total, dtype=y.dtype),
else:
- pred = y.argmax(axis=1)
+ pred = y.argmax(axis=1).reshape(t.shape)
return xp.asarray((pred == t).mean(dtype=y.dtype)),
|
{"golden_diff": "diff --git a/chainer/functions/evaluation/accuracy.py b/chainer/functions/evaluation/accuracy.py\n--- a/chainer/functions/evaluation/accuracy.py\n+++ b/chainer/functions/evaluation/accuracy.py\n@@ -1,4 +1,5 @@\n import numpy\n+import six\n \n from chainer import cuda\n from chainer import function\n@@ -16,18 +17,21 @@\n \n type_check.expect(\n x_type.dtype.kind == 'f',\n- x_type.ndim >= 2,\n- t_type.dtype == numpy.int32,\n- t_type.ndim == 1,\n- t_type.shape[0] == x_type.shape[0],\n+ t_type.dtype == numpy.int32\n )\n- for i in range(2, x_type.ndim.eval()):\n+\n+ t_ndim = t_type.ndim.eval()\n+ type_check.expect(\n+ x_type.ndim >= t_type.ndim,\n+ x_type.shape[0] == t_type.shape[0],\n+ x_type.shape[2: t_ndim + 1] == t_type.shape[1:]\n+ )\n+ for i in six.moves.range(t_ndim + 1, x_type.ndim.eval()):\n type_check.expect(x_type.shape[i] == 1)\n \n def forward(self, inputs):\n xp = cuda.get_array_module(*inputs)\n y, t = inputs\n- y = y.reshape(len(y), -1) # flatten\n \n if self.ignore_label is not None:\n mask = (t == self.ignore_label)\n@@ -37,16 +41,17 @@\n # TODO(henry0312)\n # If cupy.where returns indexes, we could make the code better.\n # Also, we would need Advanced Indexing.\n- pred = xp.where(mask, self.ignore_label, y.argmax(axis=1))\n+ pred = xp.where(mask, self.ignore_label,\n+ y.argmax(axis=1).reshape(t.shape))\n count = (pred == t).sum() - ignore_cnt\n- total = len(t) - ignore_cnt\n+ total = t.size - ignore_cnt\n \n if total == 0:\n return xp.asarray(0.0, dtype=y.dtype),\n else:\n return xp.asarray(float(count) / total, dtype=y.dtype),\n else:\n- pred = y.argmax(axis=1)\n+ pred = y.argmax(axis=1).reshape(t.shape)\n return xp.asarray((pred == t).mean(dtype=y.dtype)),\n", "issue": "Inconsistency of type checking between SoftmaxCrossEntropy and Accuracy\nWe often evaluate training procedure with both `softmax_cross_entropy` and `accuracy`, as default `chainer.links.Classifier` does. But there are cases `softmax_cross_entorpy(x, t)` is accepted but `accuracy(x, t)` raises error because of its type checking (and vise versa). Replicated softmax is one of a such examples (e.g. `x.shape = (B, C, N)` and `t.shape = (B, N)`). We need to reshape them manually in such a case.\n\nIt is convenient for users that shape checking of the two be more consistent.\n\n", "before_files": [{"content": "import numpy\n\nfrom chainer import cuda\nfrom chainer import function\nfrom chainer.utils import type_check\n\n\nclass Accuracy(function.Function):\n\n def __init__(self, ignore_label=None):\n self.ignore_label = ignore_label\n\n def check_type_forward(self, in_types):\n type_check.expect(in_types.size() == 2)\n x_type, t_type = in_types\n\n type_check.expect(\n x_type.dtype.kind == 'f',\n x_type.ndim >= 2,\n t_type.dtype == numpy.int32,\n t_type.ndim == 1,\n t_type.shape[0] == x_type.shape[0],\n )\n for i in range(2, x_type.ndim.eval()):\n type_check.expect(x_type.shape[i] == 1)\n\n def forward(self, inputs):\n xp = cuda.get_array_module(*inputs)\n y, t = inputs\n y = y.reshape(len(y), -1) # flatten\n\n if self.ignore_label is not None:\n mask = (t == self.ignore_label)\n ignore_cnt = mask.sum()\n\n # will always be true when the true label is ignore_label\n # TODO(henry0312)\n # If cupy.where returns indexes, we could make the code better.\n # Also, we would need Advanced Indexing.\n pred = xp.where(mask, self.ignore_label, y.argmax(axis=1))\n count = (pred == t).sum() - ignore_cnt\n total = len(t) - ignore_cnt\n\n if total == 0:\n return xp.asarray(0.0, dtype=y.dtype),\n else:\n return xp.asarray(float(count) / total, dtype=y.dtype),\n else:\n pred = y.argmax(axis=1)\n return xp.asarray((pred == t).mean(dtype=y.dtype)),\n\n\ndef accuracy(y, t, ignore_label=None):\n \"\"\"Computes muticlass classification accuracy of the minibatch.\n\n Args:\n y (Variable): Variable holding a matrix whose (i, j)-th element\n indicates the score of the class j at the i-th example.\n t (Variable): Variable holding an int32 vector of ground truth labels.\n ignore_label (int or None): Skip calculating accuracy\n if the ture label is ``ignore_label``.\n\n Returns:\n Variable: A variable holding a scalar array of the accuracy.\n\n .. note:: This function is non-differentiable.\n\n \"\"\"\n return Accuracy(ignore_label=ignore_label)(y, t)\n", "path": "chainer/functions/evaluation/accuracy.py"}], "after_files": [{"content": "import numpy\nimport six\n\nfrom chainer import cuda\nfrom chainer import function\nfrom chainer.utils import type_check\n\n\nclass Accuracy(function.Function):\n\n def __init__(self, ignore_label=None):\n self.ignore_label = ignore_label\n\n def check_type_forward(self, in_types):\n type_check.expect(in_types.size() == 2)\n x_type, t_type = in_types\n\n type_check.expect(\n x_type.dtype.kind == 'f',\n t_type.dtype == numpy.int32\n )\n\n t_ndim = t_type.ndim.eval()\n type_check.expect(\n x_type.ndim >= t_type.ndim,\n x_type.shape[0] == t_type.shape[0],\n x_type.shape[2: t_ndim + 1] == t_type.shape[1:]\n )\n for i in six.moves.range(t_ndim + 1, x_type.ndim.eval()):\n type_check.expect(x_type.shape[i] == 1)\n\n def forward(self, inputs):\n xp = cuda.get_array_module(*inputs)\n y, t = inputs\n\n if self.ignore_label is not None:\n mask = (t == self.ignore_label)\n ignore_cnt = mask.sum()\n\n # will always be true when the true label is ignore_label\n # TODO(henry0312)\n # If cupy.where returns indexes, we could make the code better.\n # Also, we would need Advanced Indexing.\n pred = xp.where(mask, self.ignore_label,\n y.argmax(axis=1).reshape(t.shape))\n count = (pred == t).sum() - ignore_cnt\n total = t.size - ignore_cnt\n\n if total == 0:\n return xp.asarray(0.0, dtype=y.dtype),\n else:\n return xp.asarray(float(count) / total, dtype=y.dtype),\n else:\n pred = y.argmax(axis=1).reshape(t.shape)\n return xp.asarray((pred == t).mean(dtype=y.dtype)),\n\n\ndef accuracy(y, t, ignore_label=None):\n \"\"\"Computes muticlass classification accuracy of the minibatch.\n\n Args:\n y (Variable): Variable holding a matrix whose (i, j)-th element\n indicates the score of the class j at the i-th example.\n t (Variable): Variable holding an int32 vector of ground truth labels.\n ignore_label (int or None): Skip calculating accuracy\n if the ture label is ``ignore_label``.\n\n Returns:\n Variable: A variable holding a scalar array of the accuracy.\n\n .. note:: This function is non-differentiable.\n\n \"\"\"\n return Accuracy(ignore_label=ignore_label)(y, t)\n", "path": "chainer/functions/evaluation/accuracy.py"}]}
| 1,073 | 543 |
gh_patches_debug_15087
|
rasdani/github-patches
|
git_diff
|
kedro-org__kedro-20
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
User home directory is not expanded for TextLocalDataSet
## Description
User home directory `~` is not automatically expanded for `TextLocalDataSet`, but it is automatically expanded for `ParquetLocalDataSet` and `CSVLocalDataSet `.
## Context
Trying to specify file paths relative to user home directory to simplify interoperability and handoff of Kedro pipelines between teammates - instead of manually replacing hardcoded absolute paths on each machine or user, each user will automatically recreate same directory structure relative to his/her home directory.
## Steps to Reproduce
```
from kedro.io import TextLocalDataSet
import os
string_to_write = "This will go in a file."
data_set = TextLocalDataSet(filepath="~/code/tmp/new_documentation.md")
data_set.save(string_to_write)
os.path.abspath(data_set._filepath)
```
## Expected Result
`~` should be replaced by user home directory and file should be saved relative to it in subdirectory `code/tmp/`.
## Actual Result
Directory `~/code/tmp/` is created in current working directory.
## Your Environment
Include as many relevant details about the environment in which you experienced the bug:
* Kedro version used (`pip show kedro` or `kedro -V`): v0.14.2
* Python version used (`python -V`): Python 3.6.8 :: Anaconda, Inc.
* Operating system and version: MacOS Mojave 10.14.3
## Checklist
Include labels so that we can categorise your issue:
- [ ] Add a "Component" label to the issue
- [ ] Add a "Priority" label to the issue
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kedro/io/text_local.py`
Content:
```
1 # Copyright 2018-2019 QuantumBlack Visual Analytics Limited
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
11 # OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND
12 # NONINFRINGEMENT. IN NO EVENT WILL THE LICENSOR OR OTHER CONTRIBUTORS
13 # BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER IN AN
14 # ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF, OR IN
15 # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
16 #
17 # The QuantumBlack Visual Analytics Limited (“QuantumBlack”) name and logo
18 # (either separately or in combination, “QuantumBlack Trademarks”) are
19 # trademarks of QuantumBlack. The License does not grant you any right or
20 # license to the QuantumBlack Trademarks. You may not use the QuantumBlack
21 # Trademarks or any confusingly similar mark as a trademark for your product,
22 # or use the QuantumBlack Trademarks in any other manner that might cause
23 # confusion in the marketplace, including but not limited to in advertising,
24 # on websites, or on software.
25 #
26 # See the License for the specific language governing permissions and
27 # limitations under the License.
28 """``TextLocalDataSet`` loads and saves data to a local text file. The data is
29 accessed text data using the python open function.
30 """
31 from pathlib import Path
32 from typing import Any, Dict
33
34 from kedro.io import AbstractDataSet
35 from kedro.io.core import DataSetError, FilepathVersionMixIn, Version
36
37
38 class TextLocalDataSet(AbstractDataSet, FilepathVersionMixIn):
39 """``TextLocalDataSet`` loads and saves unstructured text files.
40
41 Example:
42 ::
43
44 >>> from kedro.io import TextLocalDataSet
45 >>>
46 >>> string_to_write = "This will go in a file."
47 >>>
48 >>> data_set = TextLocalDataSet(filepath="docs/new_documentation.md")
49 >>> data_set.save(string_to_write)
50 >>> reloaded = data_set.load()
51 """
52
53 def _describe(self) -> Dict[str, Any]:
54 return dict(
55 filepath=self._filepath,
56 load_args=self._load_args,
57 save_args=self._save_args,
58 version=self._version,
59 )
60
61 def __init__(
62 self,
63 filepath: str,
64 load_args: Dict[str, Any] = None,
65 save_args: Dict[str, Any] = None,
66 version: Version = None,
67 ) -> None:
68 """Creates a new instance of ``TextFile``.
69
70 Args:
71 filepath: path to a text file.
72 load_args: Load arguments should be specified in accordance with
73 the built in open function. This can be found at
74 https://docs.python.org/3/library/functions.html#open
75 save_args: Save arguments should be specified in accordance with
76 the built in open function. This can be found at
77 https://docs.python.org/3/library/functions.html#open
78 version: If specified, should be an instance of
79 ``kedro.io.core.Version``. If its ``load`` attribute is
80 None, the latest version will be loaded. If its ``save``
81 attribute is None, save version will be autogenerated.
82 """
83 default_save_args = {"mode": "w"}
84 default_load_args = {"mode": "r"}
85
86 self._filepath = filepath
87 self._load_args = (
88 {**default_load_args, **load_args}
89 if load_args is not None
90 else default_load_args
91 )
92 self._save_args = (
93 {**default_save_args, **save_args}
94 if save_args is not None
95 else default_save_args
96 )
97 self._version = version
98
99 def _load(self) -> str:
100 load_path = self._get_load_path(self._filepath, self._version)
101 with open(load_path, **self._load_args) as _file:
102 return _file.read()
103
104 def _save(self, data: str) -> None:
105 save_path = Path(self._get_save_path(self._filepath, self._version))
106 save_path.parent.mkdir(parents=True, exist_ok=True)
107 with open(str(save_path), **self._save_args) as _file:
108 _file.write(data)
109
110 load_path = Path(self._get_load_path(self._filepath, self._version))
111 self._check_paths_consistency(
112 str(load_path.absolute()), str(save_path.absolute())
113 )
114
115 def _exists(self) -> bool:
116 try:
117 path = self._get_load_path(self._filepath, self._version)
118 except DataSetError:
119 return False
120 return Path(path).is_file()
121
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/kedro/io/text_local.py b/kedro/io/text_local.py
--- a/kedro/io/text_local.py
+++ b/kedro/io/text_local.py
@@ -28,6 +28,7 @@
"""``TextLocalDataSet`` loads and saves data to a local text file. The data is
accessed text data using the python open function.
"""
+import os
from pathlib import Path
from typing import Any, Dict
@@ -83,7 +84,7 @@
default_save_args = {"mode": "w"}
default_load_args = {"mode": "r"}
- self._filepath = filepath
+ self._filepath = os.path.expanduser(filepath)
self._load_args = (
{**default_load_args, **load_args}
if load_args is not None
|
{"golden_diff": "diff --git a/kedro/io/text_local.py b/kedro/io/text_local.py\n--- a/kedro/io/text_local.py\n+++ b/kedro/io/text_local.py\n@@ -28,6 +28,7 @@\n \"\"\"``TextLocalDataSet`` loads and saves data to a local text file. The data is\n accessed text data using the python open function.\n \"\"\"\n+import os\n from pathlib import Path\n from typing import Any, Dict\n \n@@ -83,7 +84,7 @@\n default_save_args = {\"mode\": \"w\"}\n default_load_args = {\"mode\": \"r\"}\n \n- self._filepath = filepath\n+ self._filepath = os.path.expanduser(filepath)\n self._load_args = (\n {**default_load_args, **load_args}\n if load_args is not None\n", "issue": "User home directory is not expanded for TextLocalDataSet\n## Description\r\nUser home directory `~` is not automatically expanded for `TextLocalDataSet`, but it is automatically expanded for `ParquetLocalDataSet` and `CSVLocalDataSet `.\r\n\r\n## Context\r\nTrying to specify file paths relative to user home directory to simplify interoperability and handoff of Kedro pipelines between teammates - instead of manually replacing hardcoded absolute paths on each machine or user, each user will automatically recreate same directory structure relative to his/her home directory. \r\n\r\n## Steps to Reproduce\r\n```\r\nfrom kedro.io import TextLocalDataSet\r\nimport os\r\n\r\n\r\nstring_to_write = \"This will go in a file.\"\r\n\r\ndata_set = TextLocalDataSet(filepath=\"~/code/tmp/new_documentation.md\")\r\ndata_set.save(string_to_write)\r\n\r\nos.path.abspath(data_set._filepath)\r\n```\r\n\r\n## Expected Result\r\n`~` should be replaced by user home directory and file should be saved relative to it in subdirectory `code/tmp/`.\r\n\r\n## Actual Result\r\nDirectory `~/code/tmp/` is created in current working directory.\r\n\r\n\r\n## Your Environment\r\nInclude as many relevant details about the environment in which you experienced the bug:\r\n\r\n* Kedro version used (`pip show kedro` or `kedro -V`): v0.14.2\r\n* Python version used (`python -V`): Python 3.6.8 :: Anaconda, Inc.\r\n* Operating system and version: MacOS Mojave 10.14.3\r\n\r\n## Checklist\r\nInclude labels so that we can categorise your issue:\r\n\r\n- [ ] Add a \"Component\" label to the issue\r\n- [ ] Add a \"Priority\" label to the issue\r\n\n", "before_files": [{"content": "# Copyright 2018-2019 QuantumBlack Visual Analytics Limited\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES\n# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND\n# NONINFRINGEMENT. IN NO EVENT WILL THE LICENSOR OR OTHER CONTRIBUTORS\n# BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER IN AN\n# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF, OR IN\n# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n#\n# The QuantumBlack Visual Analytics Limited (\u201cQuantumBlack\u201d) name and logo\n# (either separately or in combination, \u201cQuantumBlack Trademarks\u201d) are\n# trademarks of QuantumBlack. The License does not grant you any right or\n# license to the QuantumBlack Trademarks. You may not use the QuantumBlack\n# Trademarks or any confusingly similar mark as a trademark for your product,\n# or use the QuantumBlack Trademarks in any other manner that might cause\n# confusion in the marketplace, including but not limited to in advertising,\n# on websites, or on software.\n#\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"``TextLocalDataSet`` loads and saves data to a local text file. The data is\naccessed text data using the python open function.\n\"\"\"\nfrom pathlib import Path\nfrom typing import Any, Dict\n\nfrom kedro.io import AbstractDataSet\nfrom kedro.io.core import DataSetError, FilepathVersionMixIn, Version\n\n\nclass TextLocalDataSet(AbstractDataSet, FilepathVersionMixIn):\n \"\"\"``TextLocalDataSet`` loads and saves unstructured text files.\n\n Example:\n ::\n\n >>> from kedro.io import TextLocalDataSet\n >>>\n >>> string_to_write = \"This will go in a file.\"\n >>>\n >>> data_set = TextLocalDataSet(filepath=\"docs/new_documentation.md\")\n >>> data_set.save(string_to_write)\n >>> reloaded = data_set.load()\n \"\"\"\n\n def _describe(self) -> Dict[str, Any]:\n return dict(\n filepath=self._filepath,\n load_args=self._load_args,\n save_args=self._save_args,\n version=self._version,\n )\n\n def __init__(\n self,\n filepath: str,\n load_args: Dict[str, Any] = None,\n save_args: Dict[str, Any] = None,\n version: Version = None,\n ) -> None:\n \"\"\"Creates a new instance of ``TextFile``.\n\n Args:\n filepath: path to a text file.\n load_args: Load arguments should be specified in accordance with\n the built in open function. This can be found at\n https://docs.python.org/3/library/functions.html#open\n save_args: Save arguments should be specified in accordance with\n the built in open function. This can be found at\n https://docs.python.org/3/library/functions.html#open\n version: If specified, should be an instance of\n ``kedro.io.core.Version``. If its ``load`` attribute is\n None, the latest version will be loaded. If its ``save``\n attribute is None, save version will be autogenerated.\n \"\"\"\n default_save_args = {\"mode\": \"w\"}\n default_load_args = {\"mode\": \"r\"}\n\n self._filepath = filepath\n self._load_args = (\n {**default_load_args, **load_args}\n if load_args is not None\n else default_load_args\n )\n self._save_args = (\n {**default_save_args, **save_args}\n if save_args is not None\n else default_save_args\n )\n self._version = version\n\n def _load(self) -> str:\n load_path = self._get_load_path(self._filepath, self._version)\n with open(load_path, **self._load_args) as _file:\n return _file.read()\n\n def _save(self, data: str) -> None:\n save_path = Path(self._get_save_path(self._filepath, self._version))\n save_path.parent.mkdir(parents=True, exist_ok=True)\n with open(str(save_path), **self._save_args) as _file:\n _file.write(data)\n\n load_path = Path(self._get_load_path(self._filepath, self._version))\n self._check_paths_consistency(\n str(load_path.absolute()), str(save_path.absolute())\n )\n\n def _exists(self) -> bool:\n try:\n path = self._get_load_path(self._filepath, self._version)\n except DataSetError:\n return False\n return Path(path).is_file()\n", "path": "kedro/io/text_local.py"}], "after_files": [{"content": "# Copyright 2018-2019 QuantumBlack Visual Analytics Limited\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES\n# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND\n# NONINFRINGEMENT. IN NO EVENT WILL THE LICENSOR OR OTHER CONTRIBUTORS\n# BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER IN AN\n# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF, OR IN\n# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n#\n# The QuantumBlack Visual Analytics Limited (\u201cQuantumBlack\u201d) name and logo\n# (either separately or in combination, \u201cQuantumBlack Trademarks\u201d) are\n# trademarks of QuantumBlack. The License does not grant you any right or\n# license to the QuantumBlack Trademarks. You may not use the QuantumBlack\n# Trademarks or any confusingly similar mark as a trademark for your product,\n# or use the QuantumBlack Trademarks in any other manner that might cause\n# confusion in the marketplace, including but not limited to in advertising,\n# on websites, or on software.\n#\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"``TextLocalDataSet`` loads and saves data to a local text file. The data is\naccessed text data using the python open function.\n\"\"\"\nimport os\nfrom pathlib import Path\nfrom typing import Any, Dict\n\nfrom kedro.io import AbstractDataSet\nfrom kedro.io.core import DataSetError, FilepathVersionMixIn, Version\n\n\nclass TextLocalDataSet(AbstractDataSet, FilepathVersionMixIn):\n \"\"\"``TextLocalDataSet`` loads and saves unstructured text files.\n\n Example:\n ::\n\n >>> from kedro.io import TextLocalDataSet\n >>>\n >>> string_to_write = \"This will go in a file.\"\n >>>\n >>> data_set = TextLocalDataSet(filepath=\"docs/new_documentation.md\")\n >>> data_set.save(string_to_write)\n >>> reloaded = data_set.load()\n \"\"\"\n\n def _describe(self) -> Dict[str, Any]:\n return dict(\n filepath=self._filepath,\n load_args=self._load_args,\n save_args=self._save_args,\n version=self._version,\n )\n\n def __init__(\n self,\n filepath: str,\n load_args: Dict[str, Any] = None,\n save_args: Dict[str, Any] = None,\n version: Version = None,\n ) -> None:\n \"\"\"Creates a new instance of ``TextFile``.\n\n Args:\n filepath: path to a text file.\n load_args: Load arguments should be specified in accordance with\n the built in open function. This can be found at\n https://docs.python.org/3/library/functions.html#open\n save_args: Save arguments should be specified in accordance with\n the built in open function. This can be found at\n https://docs.python.org/3/library/functions.html#open\n version: If specified, should be an instance of\n ``kedro.io.core.Version``. If its ``load`` attribute is\n None, the latest version will be loaded. If its ``save``\n attribute is None, save version will be autogenerated.\n \"\"\"\n default_save_args = {\"mode\": \"w\"}\n default_load_args = {\"mode\": \"r\"}\n\n self._filepath = os.path.expanduser(filepath)\n self._load_args = (\n {**default_load_args, **load_args}\n if load_args is not None\n else default_load_args\n )\n self._save_args = (\n {**default_save_args, **save_args}\n if save_args is not None\n else default_save_args\n )\n self._version = version\n\n def _load(self) -> str:\n load_path = self._get_load_path(self._filepath, self._version)\n with open(load_path, **self._load_args) as _file:\n return _file.read()\n\n def _save(self, data: str) -> None:\n save_path = Path(self._get_save_path(self._filepath, self._version))\n save_path.parent.mkdir(parents=True, exist_ok=True)\n with open(str(save_path), **self._save_args) as _file:\n _file.write(data)\n\n load_path = Path(self._get_load_path(self._filepath, self._version))\n self._check_paths_consistency(\n str(load_path.absolute()), str(save_path.absolute())\n )\n\n def _exists(self) -> bool:\n try:\n path = self._get_load_path(self._filepath, self._version)\n except DataSetError:\n return False\n return Path(path).is_file()\n", "path": "kedro/io/text_local.py"}]}
| 1,927 | 179 |
gh_patches_debug_16874
|
rasdani/github-patches
|
git_diff
|
mdn__kuma-6423
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Use of access tokens as query parameters in the GitHub OAuth API is deprecated
**Summary**
We use GitHub's OAuth API as one of MDN's sign-up/sign-in OAuth providers. We're starting to receive emails from GitHub that using the https://api.github.com/user API with the access token as a query parameter has been deprecated and that the `Authorization` header should be used instead. This occurs within `GitHubOAuth2Adapter.complete_login` method provided by `django-allauth`, but `django-allauth` has not yet fixed this (although a PR has been submitted that does -- see https://github.com/pennersr/django-allauth/pull/2458). Even if `django-allauth` fixes the issue, it wouldn't help in our case since we override this method (https://github.com/mdn/kuma/blob/266bd9d8ebf24c950037a1965b1967022fca233f/kuma/users/providers/github/views.py#L20). We need to update our overridden method to pass the token via the `Authorization` header rather than via a query parameter.
**Rationale**
We no longer have to concern ourselves with using a deprecated approach.
**Audience**
All users who sign-up/sign-in to MDN via GitHub.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kuma/users/providers/github/views.py`
Content:
```
1 from allauth.account.utils import get_next_redirect_url
2 from allauth.socialaccount.providers.github.views import GitHubOAuth2Adapter
3 from allauth.socialaccount.providers.oauth2.views import (OAuth2CallbackView,
4 OAuth2LoginView)
5
6 from kuma.core.decorators import redirect_in_maintenance_mode
7 from kuma.core.urlresolvers import reverse
8 from kuma.core.utils import requests_retry_session
9
10
11 class KumaGitHubOAuth2Adapter(GitHubOAuth2Adapter):
12 """
13 A custom GitHub OAuth adapter to be used for fetching the list
14 of private email addresses stored for the given user at GitHub.
15
16 We store those email addresses in the extra data of each account.
17 """
18 email_url = 'https://api.github.com/user/emails'
19
20 def complete_login(self, request, app, token, **kwargs):
21 session = requests_retry_session()
22 params = {'access_token': token.token}
23 profile_data = session.get(self.profile_url, params=params)
24 profile_data.raise_for_status()
25 extra_data = profile_data.json()
26 email_data = session.get(self.email_url, params=params)
27 email_data.raise_for_status()
28 extra_data['email_addresses'] = email_data.json()
29 return self.get_provider().sociallogin_from_response(request,
30 extra_data)
31
32
33 class KumaOAuth2LoginView(OAuth2LoginView):
34
35 def dispatch(self, request):
36 next_url = (get_next_redirect_url(request) or
37 reverse('users.my_edit_page'))
38 request.session['sociallogin_next_url'] = next_url
39 request.session.modified = True
40 return super(KumaOAuth2LoginView, self).dispatch(request)
41
42
43 oauth2_login = redirect_in_maintenance_mode(
44 KumaOAuth2LoginView.adapter_view(KumaGitHubOAuth2Adapter)
45 )
46 oauth2_callback = redirect_in_maintenance_mode(
47 OAuth2CallbackView.adapter_view(KumaGitHubOAuth2Adapter)
48 )
49
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/kuma/users/providers/github/views.py b/kuma/users/providers/github/views.py
--- a/kuma/users/providers/github/views.py
+++ b/kuma/users/providers/github/views.py
@@ -19,11 +19,11 @@
def complete_login(self, request, app, token, **kwargs):
session = requests_retry_session()
- params = {'access_token': token.token}
- profile_data = session.get(self.profile_url, params=params)
+ headers = {'Authorization': f'token {token.token}'}
+ profile_data = session.get(self.profile_url, headers=headers)
profile_data.raise_for_status()
extra_data = profile_data.json()
- email_data = session.get(self.email_url, params=params)
+ email_data = session.get(self.email_url, headers=headers)
email_data.raise_for_status()
extra_data['email_addresses'] = email_data.json()
return self.get_provider().sociallogin_from_response(request,
|
{"golden_diff": "diff --git a/kuma/users/providers/github/views.py b/kuma/users/providers/github/views.py\n--- a/kuma/users/providers/github/views.py\n+++ b/kuma/users/providers/github/views.py\n@@ -19,11 +19,11 @@\n \n def complete_login(self, request, app, token, **kwargs):\n session = requests_retry_session()\n- params = {'access_token': token.token}\n- profile_data = session.get(self.profile_url, params=params)\n+ headers = {'Authorization': f'token {token.token}'}\n+ profile_data = session.get(self.profile_url, headers=headers)\n profile_data.raise_for_status()\n extra_data = profile_data.json()\n- email_data = session.get(self.email_url, params=params)\n+ email_data = session.get(self.email_url, headers=headers)\n email_data.raise_for_status()\n extra_data['email_addresses'] = email_data.json()\n return self.get_provider().sociallogin_from_response(request,\n", "issue": "Use of access tokens as query parameters in the GitHub OAuth API is deprecated\n**Summary**\r\nWe use GitHub's OAuth API as one of MDN's sign-up/sign-in OAuth providers. We're starting to receive emails from GitHub that using the https://api.github.com/user API with the access token as a query parameter has been deprecated and that the `Authorization` header should be used instead. This occurs within `GitHubOAuth2Adapter.complete_login` method provided by `django-allauth`, but `django-allauth` has not yet fixed this (although a PR has been submitted that does -- see https://github.com/pennersr/django-allauth/pull/2458). Even if `django-allauth` fixes the issue, it wouldn't help in our case since we override this method (https://github.com/mdn/kuma/blob/266bd9d8ebf24c950037a1965b1967022fca233f/kuma/users/providers/github/views.py#L20). We need to update our overridden method to pass the token via the `Authorization` header rather than via a query parameter.\r\n\r\n**Rationale**\r\nWe no longer have to concern ourselves with using a deprecated approach.\r\n\r\n**Audience**\r\nAll users who sign-up/sign-in to MDN via GitHub.\r\n\n", "before_files": [{"content": "from allauth.account.utils import get_next_redirect_url\nfrom allauth.socialaccount.providers.github.views import GitHubOAuth2Adapter\nfrom allauth.socialaccount.providers.oauth2.views import (OAuth2CallbackView,\n OAuth2LoginView)\n\nfrom kuma.core.decorators import redirect_in_maintenance_mode\nfrom kuma.core.urlresolvers import reverse\nfrom kuma.core.utils import requests_retry_session\n\n\nclass KumaGitHubOAuth2Adapter(GitHubOAuth2Adapter):\n \"\"\"\n A custom GitHub OAuth adapter to be used for fetching the list\n of private email addresses stored for the given user at GitHub.\n\n We store those email addresses in the extra data of each account.\n \"\"\"\n email_url = 'https://api.github.com/user/emails'\n\n def complete_login(self, request, app, token, **kwargs):\n session = requests_retry_session()\n params = {'access_token': token.token}\n profile_data = session.get(self.profile_url, params=params)\n profile_data.raise_for_status()\n extra_data = profile_data.json()\n email_data = session.get(self.email_url, params=params)\n email_data.raise_for_status()\n extra_data['email_addresses'] = email_data.json()\n return self.get_provider().sociallogin_from_response(request,\n extra_data)\n\n\nclass KumaOAuth2LoginView(OAuth2LoginView):\n\n def dispatch(self, request):\n next_url = (get_next_redirect_url(request) or\n reverse('users.my_edit_page'))\n request.session['sociallogin_next_url'] = next_url\n request.session.modified = True\n return super(KumaOAuth2LoginView, self).dispatch(request)\n\n\noauth2_login = redirect_in_maintenance_mode(\n KumaOAuth2LoginView.adapter_view(KumaGitHubOAuth2Adapter)\n)\noauth2_callback = redirect_in_maintenance_mode(\n OAuth2CallbackView.adapter_view(KumaGitHubOAuth2Adapter)\n)\n", "path": "kuma/users/providers/github/views.py"}], "after_files": [{"content": "from allauth.account.utils import get_next_redirect_url\nfrom allauth.socialaccount.providers.github.views import GitHubOAuth2Adapter\nfrom allauth.socialaccount.providers.oauth2.views import (OAuth2CallbackView,\n OAuth2LoginView)\n\nfrom kuma.core.decorators import redirect_in_maintenance_mode\nfrom kuma.core.urlresolvers import reverse\nfrom kuma.core.utils import requests_retry_session\n\n\nclass KumaGitHubOAuth2Adapter(GitHubOAuth2Adapter):\n \"\"\"\n A custom GitHub OAuth adapter to be used for fetching the list\n of private email addresses stored for the given user at GitHub.\n\n We store those email addresses in the extra data of each account.\n \"\"\"\n email_url = 'https://api.github.com/user/emails'\n\n def complete_login(self, request, app, token, **kwargs):\n session = requests_retry_session()\n headers = {'Authorization': f'token {token.token}'}\n profile_data = session.get(self.profile_url, headers=headers)\n profile_data.raise_for_status()\n extra_data = profile_data.json()\n email_data = session.get(self.email_url, headers=headers)\n email_data.raise_for_status()\n extra_data['email_addresses'] = email_data.json()\n return self.get_provider().sociallogin_from_response(request,\n extra_data)\n\n\nclass KumaOAuth2LoginView(OAuth2LoginView):\n\n def dispatch(self, request):\n next_url = (get_next_redirect_url(request) or\n reverse('users.my_edit_page'))\n request.session['sociallogin_next_url'] = next_url\n request.session.modified = True\n return super(KumaOAuth2LoginView, self).dispatch(request)\n\n\noauth2_login = redirect_in_maintenance_mode(\n KumaOAuth2LoginView.adapter_view(KumaGitHubOAuth2Adapter)\n)\noauth2_callback = redirect_in_maintenance_mode(\n OAuth2CallbackView.adapter_view(KumaGitHubOAuth2Adapter)\n)\n", "path": "kuma/users/providers/github/views.py"}]}
| 1,030 | 205 |
gh_patches_debug_59499
|
rasdani/github-patches
|
git_diff
|
Lightning-AI__pytorch-lightning-274
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Make test_tube optional
**Is your feature request related to a problem? Please describe.**
`test_tube` is only needed when using `TestTubeLogger`, thus it should not be an install requirement like `mlflow`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pytorch_lightning/logging/__init__.py`
Content:
```
1 from .base import LightningLoggerBase, rank_zero_only
2 from .test_tube_logger import TestTubeLogger
3
4 try:
5 from .mlflow_logger import MLFlowLogger
6 except ModuleNotFoundError:
7 pass
8
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pytorch_lightning/logging/__init__.py b/pytorch_lightning/logging/__init__.py
--- a/pytorch_lightning/logging/__init__.py
+++ b/pytorch_lightning/logging/__init__.py
@@ -1,6 +1,9 @@
from .base import LightningLoggerBase, rank_zero_only
-from .test_tube_logger import TestTubeLogger
+try:
+ from .test_tube_logger import TestTubeLogger
+except ModuleNotFoundError:
+ pass
try:
from .mlflow_logger import MLFlowLogger
except ModuleNotFoundError:
|
{"golden_diff": "diff --git a/pytorch_lightning/logging/__init__.py b/pytorch_lightning/logging/__init__.py\n--- a/pytorch_lightning/logging/__init__.py\n+++ b/pytorch_lightning/logging/__init__.py\n@@ -1,6 +1,9 @@\n from .base import LightningLoggerBase, rank_zero_only\n-from .test_tube_logger import TestTubeLogger\n \n+try:\n+ from .test_tube_logger import TestTubeLogger\n+except ModuleNotFoundError:\n+ pass\n try:\n from .mlflow_logger import MLFlowLogger\n except ModuleNotFoundError:\n", "issue": "Make test_tube optional\n**Is your feature request related to a problem? Please describe.**\r\n`test_tube` is only needed when using `TestTubeLogger`, thus it should not be an install requirement like `mlflow`.\n", "before_files": [{"content": "from .base import LightningLoggerBase, rank_zero_only\nfrom .test_tube_logger import TestTubeLogger\n\ntry:\n from .mlflow_logger import MLFlowLogger\nexcept ModuleNotFoundError:\n pass\n", "path": "pytorch_lightning/logging/__init__.py"}], "after_files": [{"content": "from .base import LightningLoggerBase, rank_zero_only\n\ntry:\n from .test_tube_logger import TestTubeLogger\nexcept ModuleNotFoundError:\n pass\ntry:\n from .mlflow_logger import MLFlowLogger\nexcept ModuleNotFoundError:\n pass\n", "path": "pytorch_lightning/logging/__init__.py"}]}
| 363 | 123 |
gh_patches_debug_31852
|
rasdani/github-patches
|
git_diff
|
spacetelescope__jwql-63
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Build filename parser utility function
It would be useful to have a function in the `utils.py` module that returned the individual elements of a given filename, for example:
```python
from jwql.utils.utils import parse_filename
filename_dict = parse_filename('jw94015001001_02102_00001_nrcb1_uncal.fits')
```
where `filename_dict` is:
```
{
'program_id' : '94015',
'observation' : '001',
'visit' : '001',
'visit_group' : '02',
'parallel_seq_id' : '1',
'activity' : '02',
'exposure_id' : '00001',
'detector' : 'nrcb1',
'suffix' : 'uncal'
}
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `jwql/utils/utils.py`
Content:
```
1 """Various utility functions for the jwql project.
2
3 Authors
4 -------
5
6 Matthew Bourque
7
8 Use
9 ---
10
11 This module can be imported as such:
12
13 >>> import utils
14 settings = get_config()
15 """
16
17 import json
18
19
20 def get_config():
21 """Return a dictionary that holds the contents of the jwql config
22 file.
23
24 Returns
25 -------
26 settings : dict
27 A dictionary that holds the contents of the config file.
28 """
29
30 with open('config.json', 'r') as config_file:
31 settings = json.load(config_file)
32
33 return settings
34
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/jwql/utils/utils.py b/jwql/utils/utils.py
--- a/jwql/utils/utils.py
+++ b/jwql/utils/utils.py
@@ -3,7 +3,8 @@
Authors
-------
- Matthew Bourque
+ - Matthew Bourque
+ - Lauren Chambers
Use
---
@@ -12,9 +13,19 @@
>>> import utils
settings = get_config()
+
+References
+----------
+
+ Filename parser modifed from Joe Hunkeler:
+ https://gist.github.com/jhunkeler/f08783ca2da7bfd1f8e9ee1d207da5ff
"""
import json
+import os
+import re
+
+__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
def get_config():
@@ -27,7 +38,50 @@
A dictionary that holds the contents of the config file.
"""
- with open('config.json', 'r') as config_file:
+ with open(os.path.join(__location__, 'config.json'), 'r') as config_file:
settings = json.load(config_file)
return settings
+
+
+def filename_parser(filename):
+ """Return a dictionary that contains the properties of a given
+ JWST file (e.g. program ID, visit number, detector, etc.)
+
+ Parameters
+ ----------
+ filename : str
+ Path or name of JWST file to parse
+
+ Returns
+ -------
+ filename_dict : dict
+ Collection of file properties
+
+ Raises
+ ------
+ ValueError
+ When the provided file does not follow naming conventions
+ """
+ filename = os.path.basename(filename)
+
+ elements = \
+ re.compile(r"[a-z]+"
+ "(?P<program_id>\d{5})"
+ "(?P<observation>\d{3})"
+ "(?P<visit>\d{3})"
+ "_(?P<visit_group>\d{2})"
+ "(?P<parallel_seq_id>\d{1})"
+ "(?P<activity>\d{2})"
+ "_(?P<exposure_id>\d+)"
+ "_(?P<detector>\w+)"
+ "_(?P<suffix>\w+).*")
+
+ jwst_file = elements.match(filename)
+
+ if jwst_file is not None:
+ filename_dict = jwst_file.groupdict()
+ else:
+ raise ValueError('Provided file {} does not follow JWST naming conventions (jw<PPPPP><OOO><VVV>_<GGSAA>_<EEEEE>_<detector>_<suffix>.fits)'.format(filename))
+
+ return filename_dict
|
{"golden_diff": "diff --git a/jwql/utils/utils.py b/jwql/utils/utils.py\n--- a/jwql/utils/utils.py\n+++ b/jwql/utils/utils.py\n@@ -3,7 +3,8 @@\n Authors\n -------\n \n- Matthew Bourque\n+ - Matthew Bourque\n+ - Lauren Chambers\n \n Use\n ---\n@@ -12,9 +13,19 @@\n \n >>> import utils\n settings = get_config()\n+\n+References\n+----------\n+\n+ Filename parser modifed from Joe Hunkeler:\n+ https://gist.github.com/jhunkeler/f08783ca2da7bfd1f8e9ee1d207da5ff\n \"\"\"\n \n import json\n+import os\n+import re\n+\n+__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))\n \n \n def get_config():\n@@ -27,7 +38,50 @@\n A dictionary that holds the contents of the config file.\n \"\"\"\n \n- with open('config.json', 'r') as config_file:\n+ with open(os.path.join(__location__, 'config.json'), 'r') as config_file:\n settings = json.load(config_file)\n \n return settings\n+\n+\n+def filename_parser(filename):\n+ \"\"\"Return a dictionary that contains the properties of a given\n+ JWST file (e.g. program ID, visit number, detector, etc.)\n+\n+ Parameters\n+ ----------\n+ filename : str\n+ Path or name of JWST file to parse\n+\n+ Returns\n+ -------\n+ filename_dict : dict\n+ Collection of file properties\n+\n+ Raises\n+ ------\n+ ValueError\n+ When the provided file does not follow naming conventions\n+ \"\"\"\n+ filename = os.path.basename(filename)\n+\n+ elements = \\\n+ re.compile(r\"[a-z]+\"\n+ \"(?P<program_id>\\d{5})\"\n+ \"(?P<observation>\\d{3})\"\n+ \"(?P<visit>\\d{3})\"\n+ \"_(?P<visit_group>\\d{2})\"\n+ \"(?P<parallel_seq_id>\\d{1})\"\n+ \"(?P<activity>\\d{2})\"\n+ \"_(?P<exposure_id>\\d+)\"\n+ \"_(?P<detector>\\w+)\"\n+ \"_(?P<suffix>\\w+).*\")\n+\n+ jwst_file = elements.match(filename)\n+\n+ if jwst_file is not None:\n+ filename_dict = jwst_file.groupdict()\n+ else:\n+ raise ValueError('Provided file {} does not follow JWST naming conventions (jw<PPPPP><OOO><VVV>_<GGSAA>_<EEEEE>_<detector>_<suffix>.fits)'.format(filename))\n+\n+ return filename_dict\n", "issue": "Build filename parser utility function\nIt would be useful to have a function in the `utils.py` module that returned the individual elements of a given filename, for example:\r\n\r\n```python\r\nfrom jwql.utils.utils import parse_filename\r\nfilename_dict = parse_filename('jw94015001001_02102_00001_nrcb1_uncal.fits')\r\n```\r\n\r\nwhere `filename_dict` is:\r\n\r\n```\r\n{\r\n 'program_id' : '94015',\r\n 'observation' : '001',\r\n 'visit' : '001',\r\n 'visit_group' : '02',\r\n 'parallel_seq_id' : '1',\r\n 'activity' : '02',\r\n 'exposure_id' : '00001',\r\n 'detector' : 'nrcb1',\r\n 'suffix' : 'uncal'\r\n}\r\n```\n", "before_files": [{"content": "\"\"\"Various utility functions for the jwql project.\n\nAuthors\n-------\n\n Matthew Bourque\n\nUse\n---\n\n This module can be imported as such:\n\n >>> import utils\n settings = get_config()\n\"\"\"\n\nimport json\n\n\ndef get_config():\n \"\"\"Return a dictionary that holds the contents of the jwql config\n file.\n\n Returns\n -------\n settings : dict\n A dictionary that holds the contents of the config file.\n \"\"\"\n\n with open('config.json', 'r') as config_file:\n settings = json.load(config_file)\n\n return settings\n", "path": "jwql/utils/utils.py"}], "after_files": [{"content": "\"\"\"Various utility functions for the jwql project.\n\nAuthors\n-------\n\n - Matthew Bourque\n - Lauren Chambers\n\nUse\n---\n\n This module can be imported as such:\n\n >>> import utils\n settings = get_config()\n\nReferences\n----------\n\n Filename parser modifed from Joe Hunkeler:\n https://gist.github.com/jhunkeler/f08783ca2da7bfd1f8e9ee1d207da5ff\n\"\"\"\n\nimport json\nimport os\nimport re\n\n__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))\n\n\ndef get_config():\n \"\"\"Return a dictionary that holds the contents of the jwql config\n file.\n\n Returns\n -------\n settings : dict\n A dictionary that holds the contents of the config file.\n \"\"\"\n\n with open(os.path.join(__location__, 'config.json'), 'r') as config_file:\n settings = json.load(config_file)\n\n return settings\n\n\ndef filename_parser(filename):\n \"\"\"Return a dictionary that contains the properties of a given\n JWST file (e.g. program ID, visit number, detector, etc.)\n\n Parameters\n ----------\n filename : str\n Path or name of JWST file to parse\n\n Returns\n -------\n filename_dict : dict\n Collection of file properties\n\n Raises\n ------\n ValueError\n When the provided file does not follow naming conventions\n \"\"\"\n filename = os.path.basename(filename)\n\n elements = \\\n re.compile(r\"[a-z]+\"\n \"(?P<program_id>\\d{5})\"\n \"(?P<observation>\\d{3})\"\n \"(?P<visit>\\d{3})\"\n \"_(?P<visit_group>\\d{2})\"\n \"(?P<parallel_seq_id>\\d{1})\"\n \"(?P<activity>\\d{2})\"\n \"_(?P<exposure_id>\\d+)\"\n \"_(?P<detector>\\w+)\"\n \"_(?P<suffix>\\w+).*\")\n\n jwst_file = elements.match(filename)\n\n if jwst_file is not None:\n filename_dict = jwst_file.groupdict()\n else:\n raise ValueError('Provided file {} does not follow JWST naming conventions (jw<PPPPP><OOO><VVV>_<GGSAA>_<EEEEE>_<detector>_<suffix>.fits)'.format(filename))\n\n return filename_dict\n", "path": "jwql/utils/utils.py"}]}
| 646 | 626 |
gh_patches_debug_40427
|
rasdani/github-patches
|
git_diff
|
python-telegram-bot__python-telegram-bot-4290
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[FEATURE] Add chat_id in ChatMemberHandler to Filter Specific Chat(s)
### What kind of feature are you missing? Where do you notice a shortcoming of PTB?
ChatMemberHandler can include a `chat_id` param to filter only on specified chat(s).
### Describe the solution you'd like
It would be convenient to be able to do something like this:
``` python
application.add_handler(
ChatMemberHandler(member_callback_1, ChatMemberHandler.CHAT_MEMBER, chat_id=<chat_id_1>)
)
application.add_handler(
ChatMemberHandler(member_callback_2, ChatMemberHandler.CHAT_MEMBER, chat_id=<chat_id_2>)
)
```
### Describe alternatives you've considered
``` python
async def member_callback_1(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:
if update.chat_member.chat.id != <chat_id_1>:
return
...
async def member_callback_2(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:
if update.chat_member.chat.id != <chat_id_2>:
return
...
```
### Additional context
Let me know if there is an alternate / better way to achieve this.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `telegram/ext/_handlers/chatmemberhandler.py`
Content:
```
1 #!/usr/bin/env python
2 #
3 # A library that provides a Python interface to the Telegram Bot API
4 # Copyright (C) 2015-2024
5 # Leandro Toledo de Souza <[email protected]>
6 #
7 # This program is free software: you can redistribute it and/or modify
8 # it under the terms of the GNU Lesser Public License as published by
9 # the Free Software Foundation, either version 3 of the License, or
10 # (at your option) any later version.
11 #
12 # This program is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 # GNU Lesser Public License for more details.
16 #
17 # You should have received a copy of the GNU Lesser Public License
18 # along with this program. If not, see [http://www.gnu.org/licenses/].
19 """This module contains the ChatMemberHandler class."""
20 from typing import Final, Optional, TypeVar
21
22 from telegram import Update
23 from telegram._utils.defaultvalue import DEFAULT_TRUE
24 from telegram._utils.types import DVType
25 from telegram.ext._handlers.basehandler import BaseHandler
26 from telegram.ext._utils.types import CCT, HandlerCallback
27
28 RT = TypeVar("RT")
29
30
31 class ChatMemberHandler(BaseHandler[Update, CCT]):
32 """Handler class to handle Telegram updates that contain a chat member update.
33
34 Warning:
35 When setting :paramref:`block` to :obj:`False`, you cannot rely on adding custom
36 attributes to :class:`telegram.ext.CallbackContext`. See its docs for more info.
37
38 Examples:
39 :any:`Chat Member Bot <examples.chatmemberbot>`
40
41 .. versionadded:: 13.4
42
43 Args:
44 callback (:term:`coroutine function`): The callback function for this handler. Will be
45 called when :meth:`check_update` has determined that an update should be processed by
46 this handler. Callback signature::
47
48 async def callback(update: Update, context: CallbackContext)
49
50 The return value of the callback is usually ignored except for the special case of
51 :class:`telegram.ext.ConversationHandler`.
52 chat_member_types (:obj:`int`, optional): Pass one of :attr:`MY_CHAT_MEMBER`,
53 :attr:`CHAT_MEMBER` or :attr:`ANY_CHAT_MEMBER` to specify if this handler should handle
54 only updates with :attr:`telegram.Update.my_chat_member`,
55 :attr:`telegram.Update.chat_member` or both. Defaults to :attr:`MY_CHAT_MEMBER`.
56 block (:obj:`bool`, optional): Determines whether the return value of the callback should
57 be awaited before processing the next handler in
58 :meth:`telegram.ext.Application.process_update`. Defaults to :obj:`True`.
59
60 .. seealso:: :wiki:`Concurrency`
61
62 Attributes:
63 callback (:term:`coroutine function`): The callback function for this handler.
64 chat_member_types (:obj:`int`): Optional. Specifies if this handler should handle
65 only updates with :attr:`telegram.Update.my_chat_member`,
66 :attr:`telegram.Update.chat_member` or both.
67 block (:obj:`bool`): Determines whether the return value of the callback should be
68 awaited before processing the next handler in
69 :meth:`telegram.ext.Application.process_update`.
70
71 """
72
73 __slots__ = ("chat_member_types",)
74 MY_CHAT_MEMBER: Final[int] = -1
75 """:obj:`int`: Used as a constant to handle only :attr:`telegram.Update.my_chat_member`."""
76 CHAT_MEMBER: Final[int] = 0
77 """:obj:`int`: Used as a constant to handle only :attr:`telegram.Update.chat_member`."""
78 ANY_CHAT_MEMBER: Final[int] = 1
79 """:obj:`int`: Used as a constant to handle both :attr:`telegram.Update.my_chat_member`
80 and :attr:`telegram.Update.chat_member`."""
81
82 def __init__(
83 self,
84 callback: HandlerCallback[Update, CCT, RT],
85 chat_member_types: int = MY_CHAT_MEMBER,
86 block: DVType[bool] = DEFAULT_TRUE,
87 ):
88 super().__init__(callback, block=block)
89
90 self.chat_member_types: Optional[int] = chat_member_types
91
92 def check_update(self, update: object) -> bool:
93 """Determines whether an update should be passed to this handler's :attr:`callback`.
94
95 Args:
96 update (:class:`telegram.Update` | :obj:`object`): Incoming update.
97
98 Returns:
99 :obj:`bool`
100
101 """
102 if isinstance(update, Update):
103 if not (update.my_chat_member or update.chat_member):
104 return False
105 if self.chat_member_types == self.ANY_CHAT_MEMBER:
106 return True
107 if self.chat_member_types == self.CHAT_MEMBER:
108 return bool(update.chat_member)
109 return bool(update.my_chat_member)
110 return False
111
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/telegram/ext/_handlers/chatmemberhandler.py b/telegram/ext/_handlers/chatmemberhandler.py
--- a/telegram/ext/_handlers/chatmemberhandler.py
+++ b/telegram/ext/_handlers/chatmemberhandler.py
@@ -21,8 +21,9 @@
from telegram import Update
from telegram._utils.defaultvalue import DEFAULT_TRUE
-from telegram._utils.types import DVType
+from telegram._utils.types import SCT, DVType
from telegram.ext._handlers.basehandler import BaseHandler
+from telegram.ext._utils._update_parsing import parse_chat_id
from telegram.ext._utils.types import CCT, HandlerCallback
RT = TypeVar("RT")
@@ -58,6 +59,9 @@
:meth:`telegram.ext.Application.process_update`. Defaults to :obj:`True`.
.. seealso:: :wiki:`Concurrency`
+ chat_id (:obj:`int` | Collection[:obj:`int`], optional): Filters chat member updates from
+ specified chat ID(s) only.
+ .. versionadded:: NEXT.VERSION
Attributes:
callback (:term:`coroutine function`): The callback function for this handler.
@@ -70,7 +74,10 @@
"""
- __slots__ = ("chat_member_types",)
+ __slots__ = (
+ "_chat_ids",
+ "chat_member_types",
+ )
MY_CHAT_MEMBER: Final[int] = -1
""":obj:`int`: Used as a constant to handle only :attr:`telegram.Update.my_chat_member`."""
CHAT_MEMBER: Final[int] = 0
@@ -84,10 +91,12 @@
callback: HandlerCallback[Update, CCT, RT],
chat_member_types: int = MY_CHAT_MEMBER,
block: DVType[bool] = DEFAULT_TRUE,
+ chat_id: Optional[SCT[int]] = None,
):
super().__init__(callback, block=block)
self.chat_member_types: Optional[int] = chat_member_types
+ self._chat_ids = parse_chat_id(chat_id)
def check_update(self, update: object) -> bool:
"""Determines whether an update should be passed to this handler's :attr:`callback`.
@@ -99,12 +108,18 @@
:obj:`bool`
"""
- if isinstance(update, Update):
- if not (update.my_chat_member or update.chat_member):
- return False
- if self.chat_member_types == self.ANY_CHAT_MEMBER:
- return True
- if self.chat_member_types == self.CHAT_MEMBER:
- return bool(update.chat_member)
- return bool(update.my_chat_member)
- return False
+ if not isinstance(update, Update):
+ return False
+ if not (update.my_chat_member or update.chat_member):
+ return False
+ if (
+ self._chat_ids
+ and update.effective_chat
+ and update.effective_chat.id not in self._chat_ids
+ ):
+ return False
+ if self.chat_member_types == self.ANY_CHAT_MEMBER:
+ return True
+ if self.chat_member_types == self.CHAT_MEMBER:
+ return bool(update.chat_member)
+ return bool(update.my_chat_member)
|
{"golden_diff": "diff --git a/telegram/ext/_handlers/chatmemberhandler.py b/telegram/ext/_handlers/chatmemberhandler.py\n--- a/telegram/ext/_handlers/chatmemberhandler.py\n+++ b/telegram/ext/_handlers/chatmemberhandler.py\n@@ -21,8 +21,9 @@\n \n from telegram import Update\n from telegram._utils.defaultvalue import DEFAULT_TRUE\n-from telegram._utils.types import DVType\n+from telegram._utils.types import SCT, DVType\n from telegram.ext._handlers.basehandler import BaseHandler\n+from telegram.ext._utils._update_parsing import parse_chat_id\n from telegram.ext._utils.types import CCT, HandlerCallback\n \n RT = TypeVar(\"RT\")\n@@ -58,6 +59,9 @@\n :meth:`telegram.ext.Application.process_update`. Defaults to :obj:`True`.\n \n .. seealso:: :wiki:`Concurrency`\n+ chat_id (:obj:`int` | Collection[:obj:`int`], optional): Filters chat member updates from\n+ specified chat ID(s) only.\n+ .. versionadded:: NEXT.VERSION\n \n Attributes:\n callback (:term:`coroutine function`): The callback function for this handler.\n@@ -70,7 +74,10 @@\n \n \"\"\"\n \n- __slots__ = (\"chat_member_types\",)\n+ __slots__ = (\n+ \"_chat_ids\",\n+ \"chat_member_types\",\n+ )\n MY_CHAT_MEMBER: Final[int] = -1\n \"\"\":obj:`int`: Used as a constant to handle only :attr:`telegram.Update.my_chat_member`.\"\"\"\n CHAT_MEMBER: Final[int] = 0\n@@ -84,10 +91,12 @@\n callback: HandlerCallback[Update, CCT, RT],\n chat_member_types: int = MY_CHAT_MEMBER,\n block: DVType[bool] = DEFAULT_TRUE,\n+ chat_id: Optional[SCT[int]] = None,\n ):\n super().__init__(callback, block=block)\n \n self.chat_member_types: Optional[int] = chat_member_types\n+ self._chat_ids = parse_chat_id(chat_id)\n \n def check_update(self, update: object) -> bool:\n \"\"\"Determines whether an update should be passed to this handler's :attr:`callback`.\n@@ -99,12 +108,18 @@\n :obj:`bool`\n \n \"\"\"\n- if isinstance(update, Update):\n- if not (update.my_chat_member or update.chat_member):\n- return False\n- if self.chat_member_types == self.ANY_CHAT_MEMBER:\n- return True\n- if self.chat_member_types == self.CHAT_MEMBER:\n- return bool(update.chat_member)\n- return bool(update.my_chat_member)\n- return False\n+ if not isinstance(update, Update):\n+ return False\n+ if not (update.my_chat_member or update.chat_member):\n+ return False\n+ if (\n+ self._chat_ids\n+ and update.effective_chat\n+ and update.effective_chat.id not in self._chat_ids\n+ ):\n+ return False\n+ if self.chat_member_types == self.ANY_CHAT_MEMBER:\n+ return True\n+ if self.chat_member_types == self.CHAT_MEMBER:\n+ return bool(update.chat_member)\n+ return bool(update.my_chat_member)\n", "issue": "[FEATURE] Add chat_id in ChatMemberHandler to Filter Specific Chat(s)\n### What kind of feature are you missing? Where do you notice a shortcoming of PTB?\n\nChatMemberHandler can include a `chat_id` param to filter only on specified chat(s).\n\n### Describe the solution you'd like\n\nIt would be convenient to be able to do something like this:\r\n\r\n``` python\r\napplication.add_handler(\r\n ChatMemberHandler(member_callback_1, ChatMemberHandler.CHAT_MEMBER, chat_id=<chat_id_1>)\r\n)\r\n\r\napplication.add_handler(\r\n ChatMemberHandler(member_callback_2, ChatMemberHandler.CHAT_MEMBER, chat_id=<chat_id_2>)\r\n)\r\n```\n\n### Describe alternatives you've considered\n\n``` python\r\nasync def member_callback_1(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:\r\n if update.chat_member.chat.id != <chat_id_1>:\r\n return\r\n ...\r\n\r\nasync def member_callback_2(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:\r\n if update.chat_member.chat.id != <chat_id_2>:\r\n return\r\n ...\r\n```\n\n### Additional context\n\nLet me know if there is an alternate / better way to achieve this. \n", "before_files": [{"content": "#!/usr/bin/env python\n#\n# A library that provides a Python interface to the Telegram Bot API\n# Copyright (C) 2015-2024\n# Leandro Toledo de Souza <[email protected]>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser Public License for more details.\n#\n# You should have received a copy of the GNU Lesser Public License\n# along with this program. If not, see [http://www.gnu.org/licenses/].\n\"\"\"This module contains the ChatMemberHandler class.\"\"\"\nfrom typing import Final, Optional, TypeVar\n\nfrom telegram import Update\nfrom telegram._utils.defaultvalue import DEFAULT_TRUE\nfrom telegram._utils.types import DVType\nfrom telegram.ext._handlers.basehandler import BaseHandler\nfrom telegram.ext._utils.types import CCT, HandlerCallback\n\nRT = TypeVar(\"RT\")\n\n\nclass ChatMemberHandler(BaseHandler[Update, CCT]):\n \"\"\"Handler class to handle Telegram updates that contain a chat member update.\n\n Warning:\n When setting :paramref:`block` to :obj:`False`, you cannot rely on adding custom\n attributes to :class:`telegram.ext.CallbackContext`. See its docs for more info.\n\n Examples:\n :any:`Chat Member Bot <examples.chatmemberbot>`\n\n .. versionadded:: 13.4\n\n Args:\n callback (:term:`coroutine function`): The callback function for this handler. Will be\n called when :meth:`check_update` has determined that an update should be processed by\n this handler. Callback signature::\n\n async def callback(update: Update, context: CallbackContext)\n\n The return value of the callback is usually ignored except for the special case of\n :class:`telegram.ext.ConversationHandler`.\n chat_member_types (:obj:`int`, optional): Pass one of :attr:`MY_CHAT_MEMBER`,\n :attr:`CHAT_MEMBER` or :attr:`ANY_CHAT_MEMBER` to specify if this handler should handle\n only updates with :attr:`telegram.Update.my_chat_member`,\n :attr:`telegram.Update.chat_member` or both. Defaults to :attr:`MY_CHAT_MEMBER`.\n block (:obj:`bool`, optional): Determines whether the return value of the callback should\n be awaited before processing the next handler in\n :meth:`telegram.ext.Application.process_update`. Defaults to :obj:`True`.\n\n .. seealso:: :wiki:`Concurrency`\n\n Attributes:\n callback (:term:`coroutine function`): The callback function for this handler.\n chat_member_types (:obj:`int`): Optional. Specifies if this handler should handle\n only updates with :attr:`telegram.Update.my_chat_member`,\n :attr:`telegram.Update.chat_member` or both.\n block (:obj:`bool`): Determines whether the return value of the callback should be\n awaited before processing the next handler in\n :meth:`telegram.ext.Application.process_update`.\n\n \"\"\"\n\n __slots__ = (\"chat_member_types\",)\n MY_CHAT_MEMBER: Final[int] = -1\n \"\"\":obj:`int`: Used as a constant to handle only :attr:`telegram.Update.my_chat_member`.\"\"\"\n CHAT_MEMBER: Final[int] = 0\n \"\"\":obj:`int`: Used as a constant to handle only :attr:`telegram.Update.chat_member`.\"\"\"\n ANY_CHAT_MEMBER: Final[int] = 1\n \"\"\":obj:`int`: Used as a constant to handle both :attr:`telegram.Update.my_chat_member`\n and :attr:`telegram.Update.chat_member`.\"\"\"\n\n def __init__(\n self,\n callback: HandlerCallback[Update, CCT, RT],\n chat_member_types: int = MY_CHAT_MEMBER,\n block: DVType[bool] = DEFAULT_TRUE,\n ):\n super().__init__(callback, block=block)\n\n self.chat_member_types: Optional[int] = chat_member_types\n\n def check_update(self, update: object) -> bool:\n \"\"\"Determines whether an update should be passed to this handler's :attr:`callback`.\n\n Args:\n update (:class:`telegram.Update` | :obj:`object`): Incoming update.\n\n Returns:\n :obj:`bool`\n\n \"\"\"\n if isinstance(update, Update):\n if not (update.my_chat_member or update.chat_member):\n return False\n if self.chat_member_types == self.ANY_CHAT_MEMBER:\n return True\n if self.chat_member_types == self.CHAT_MEMBER:\n return bool(update.chat_member)\n return bool(update.my_chat_member)\n return False\n", "path": "telegram/ext/_handlers/chatmemberhandler.py"}], "after_files": [{"content": "#!/usr/bin/env python\n#\n# A library that provides a Python interface to the Telegram Bot API\n# Copyright (C) 2015-2024\n# Leandro Toledo de Souza <[email protected]>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser Public License for more details.\n#\n# You should have received a copy of the GNU Lesser Public License\n# along with this program. If not, see [http://www.gnu.org/licenses/].\n\"\"\"This module contains the ChatMemberHandler class.\"\"\"\nfrom typing import Final, Optional, TypeVar\n\nfrom telegram import Update\nfrom telegram._utils.defaultvalue import DEFAULT_TRUE\nfrom telegram._utils.types import SCT, DVType\nfrom telegram.ext._handlers.basehandler import BaseHandler\nfrom telegram.ext._utils._update_parsing import parse_chat_id\nfrom telegram.ext._utils.types import CCT, HandlerCallback\n\nRT = TypeVar(\"RT\")\n\n\nclass ChatMemberHandler(BaseHandler[Update, CCT]):\n \"\"\"Handler class to handle Telegram updates that contain a chat member update.\n\n Warning:\n When setting :paramref:`block` to :obj:`False`, you cannot rely on adding custom\n attributes to :class:`telegram.ext.CallbackContext`. See its docs for more info.\n\n Examples:\n :any:`Chat Member Bot <examples.chatmemberbot>`\n\n .. versionadded:: 13.4\n\n Args:\n callback (:term:`coroutine function`): The callback function for this handler. Will be\n called when :meth:`check_update` has determined that an update should be processed by\n this handler. Callback signature::\n\n async def callback(update: Update, context: CallbackContext)\n\n The return value of the callback is usually ignored except for the special case of\n :class:`telegram.ext.ConversationHandler`.\n chat_member_types (:obj:`int`, optional): Pass one of :attr:`MY_CHAT_MEMBER`,\n :attr:`CHAT_MEMBER` or :attr:`ANY_CHAT_MEMBER` to specify if this handler should handle\n only updates with :attr:`telegram.Update.my_chat_member`,\n :attr:`telegram.Update.chat_member` or both. Defaults to :attr:`MY_CHAT_MEMBER`.\n block (:obj:`bool`, optional): Determines whether the return value of the callback should\n be awaited before processing the next handler in\n :meth:`telegram.ext.Application.process_update`. Defaults to :obj:`True`.\n\n .. seealso:: :wiki:`Concurrency`\n chat_id (:obj:`int` | Collection[:obj:`int`], optional): Filters chat member updates from\n specified chat ID(s) only.\n .. versionadded:: NEXT.VERSION\n\n Attributes:\n callback (:term:`coroutine function`): The callback function for this handler.\n chat_member_types (:obj:`int`): Optional. Specifies if this handler should handle\n only updates with :attr:`telegram.Update.my_chat_member`,\n :attr:`telegram.Update.chat_member` or both.\n block (:obj:`bool`): Determines whether the return value of the callback should be\n awaited before processing the next handler in\n :meth:`telegram.ext.Application.process_update`.\n\n \"\"\"\n\n __slots__ = (\n \"_chat_ids\",\n \"chat_member_types\",\n )\n MY_CHAT_MEMBER: Final[int] = -1\n \"\"\":obj:`int`: Used as a constant to handle only :attr:`telegram.Update.my_chat_member`.\"\"\"\n CHAT_MEMBER: Final[int] = 0\n \"\"\":obj:`int`: Used as a constant to handle only :attr:`telegram.Update.chat_member`.\"\"\"\n ANY_CHAT_MEMBER: Final[int] = 1\n \"\"\":obj:`int`: Used as a constant to handle both :attr:`telegram.Update.my_chat_member`\n and :attr:`telegram.Update.chat_member`.\"\"\"\n\n def __init__(\n self,\n callback: HandlerCallback[Update, CCT, RT],\n chat_member_types: int = MY_CHAT_MEMBER,\n block: DVType[bool] = DEFAULT_TRUE,\n chat_id: Optional[SCT[int]] = None,\n ):\n super().__init__(callback, block=block)\n\n self.chat_member_types: Optional[int] = chat_member_types\n self._chat_ids = parse_chat_id(chat_id)\n\n def check_update(self, update: object) -> bool:\n \"\"\"Determines whether an update should be passed to this handler's :attr:`callback`.\n\n Args:\n update (:class:`telegram.Update` | :obj:`object`): Incoming update.\n\n Returns:\n :obj:`bool`\n\n \"\"\"\n if not isinstance(update, Update):\n return False\n if not (update.my_chat_member or update.chat_member):\n return False\n if (\n self._chat_ids\n and update.effective_chat\n and update.effective_chat.id not in self._chat_ids\n ):\n return False\n if self.chat_member_types == self.ANY_CHAT_MEMBER:\n return True\n if self.chat_member_types == self.CHAT_MEMBER:\n return bool(update.chat_member)\n return bool(update.my_chat_member)\n", "path": "telegram/ext/_handlers/chatmemberhandler.py"}]}
| 1,788 | 710 |
gh_patches_debug_20806
|
rasdani/github-patches
|
git_diff
|
pwndbg__pwndbg-416
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
utf8 decoding error
### Description
Context is not correctly displayed due to utf8 decoding error.
Traceback (most recent call last):
File "/home/carstein/Tools/pwndbg/pwndbg/commands/__init__.py", line 109, in __call__
return self.function(*args, **kwargs)
File "/home/carstein/Tools/pwndbg/pwndbg/commands/__init__.py", line 200, in _OnlyWhenRunning
return function(*a, **kw)
File "/home/carstein/Tools/pwndbg/pwndbg/commands/context.py", line 86, in context
result.extend(func())
File "/home/carstein/Tools/pwndbg/pwndbg/commands/context.py", line 98, in context_regs
return [pwndbg.ui.banner("registers")] + get_regs()
File "/home/carstein/Tools/pwndbg/pwndbg/ui.py", line 52, in banner
banner = ljust_colored(banner, width, str(config.banner_separator))
File "/home/carstein/Tools/pwndbg/pwndbg/color/__init__.py", line 77, in ljust_colored
return x + ((remaining // len(char) + 1) * char)[:remaining]
UnicodeDecodeError: 'utf8' codec can't decode byte 0xe2 in position 105: unexpected end of data
### Steps to reproduce
1. Load binary into gdb
2. `break main`
3. `run`
### My setup
pwndbg version:
Gdb: GNU gdb (GDB) 8.1
Python: 2.7.13 (default, Nov 24 2017, 17:33:09) [GCC 6.3.0 20170516]
Pwndbg: 1.0.0 build: 869e832
Capstone: 4.0.1024
Unicorn: 1.0.1
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pwndbg/color/__init__.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 from __future__ import absolute_import
4 from __future__ import division
5 from __future__ import print_function
6 from __future__ import unicode_literals
7
8 import re
9
10 import pwndbg.memoize
11
12 NORMAL = "\x1b[0m"
13 BLACK = "\x1b[30m"
14 RED = "\x1b[31m"
15 GREEN = "\x1b[32m"
16 YELLOW = "\x1b[33m"
17 BLUE = "\x1b[34m"
18 PURPLE = "\x1b[35m"
19 CYAN = "\x1b[36m"
20 LIGHT_GREY = LIGHT_GRAY = "\x1b[37m"
21 FOREGROUND = "\x1b[39m"
22 GREY = GRAY = "\x1b[90m"
23 LIGHT_RED = "\x1b[91m"
24 LIGHT_GREEN = "\x1b[92m"
25 LIGHT_YELLOW = "\x1b[93m"
26 LIGHT_BLUE = "\x1b[94m"
27 LIGHT_PURPLE = "\x1b[95m"
28 LIGHT_CYAN = "\x1b[96m"
29 WHITE = "\x1b[97m"
30 BOLD = "\x1b[1m"
31 UNDERLINE = "\x1b[4m"
32
33 def none(x): return str(x)
34 def normal(x): return colorize(x, NORMAL)
35 def black(x): return colorize(x, BLACK)
36 def red(x): return colorize(x, RED)
37 def green(x): return colorize(x, GREEN)
38 def yellow(x): return colorize(x, YELLOW)
39 def blue(x): return colorize(x, BLUE)
40 def purple(x): return colorize(x, PURPLE)
41 def cyan(x): return colorize(x, CYAN)
42 def light_gray(x): return colorize(x, LIGHT_GRAY)
43 def foreground(x): return colorize(x, FOREGROUND)
44 def gray(x): return colorize(x, GRAY)
45 def light_red(x): return colorize(x, LIGHT_RED)
46 def light_green(x): return colorize(x, LIGHT_GREEN)
47 def light_yellow(x): return colorize(x, LIGHT_YELLOW)
48 def light_blue(x): return colorize(x, LIGHT_BLUE)
49 def light_purple(x): return colorize(x, LIGHT_PURPLE)
50 def light_cyan(x): return colorize(x, LIGHT_CYAN)
51 def white(x): return colorize(x, WHITE)
52 def bold(x): return colorize(x, BOLD)
53 def underline(x): return colorize(x, UNDERLINE)
54 def colorize(x, color): return color + terminateWith(str(x), color) + NORMAL
55
56 @pwndbg.memoize.reset_on_stop
57 def generateColorFunctionInner(old, new):
58 def wrapper(text):
59 return new(old(text))
60 return wrapper
61
62 def generateColorFunction(config):
63 function = lambda x: x
64 for color in str(config).split(','):
65 function = generateColorFunctionInner(function, globals()[color.lower().replace('-', '_')])
66 return function
67
68 def strip(x):
69 return re.sub('\x1b\\[\d+m', '', x)
70
71 def terminateWith(x, color):
72 return re.sub('\x1b\\[0m', NORMAL + color, x)
73
74 def ljust_colored(x, length, char=' '):
75 remaining = length - len(strip(x))
76 return x + ((remaining // len(char) + 1) * char)[:remaining]
77
78 def rjust_colored(x, length, char=' '):
79 remaining = length - len(strip(x))
80 return ((remaining // len(char) + 1) * char)[:remaining] + x
81
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pwndbg/color/__init__.py b/pwndbg/color/__init__.py
--- a/pwndbg/color/__init__.py
+++ b/pwndbg/color/__init__.py
@@ -7,6 +7,8 @@
import re
+import six
+
import pwndbg.memoize
NORMAL = "\x1b[0m"
@@ -72,9 +74,17 @@
return re.sub('\x1b\\[0m', NORMAL + color, x)
def ljust_colored(x, length, char=' '):
+ # TODO: workaround until issue #404
+ if six.PY2:
+ x = x if isinstance(x, six.text_type) else x.decode('utf8')
+ char = char if isinstance(char, six.text_type) else char.decode('utf8')
remaining = length - len(strip(x))
return x + ((remaining // len(char) + 1) * char)[:remaining]
def rjust_colored(x, length, char=' '):
+ # TODO: workaround until issue #404
+ if six.PY2:
+ x = x if isinstance(x, six.text_type) else x.decode('utf8')
+ char = char if isinstance(char, six.text_type) else char.decode('utf8')
remaining = length - len(strip(x))
return ((remaining // len(char) + 1) * char)[:remaining] + x
|
{"golden_diff": "diff --git a/pwndbg/color/__init__.py b/pwndbg/color/__init__.py\n--- a/pwndbg/color/__init__.py\n+++ b/pwndbg/color/__init__.py\n@@ -7,6 +7,8 @@\n \n import re\n \n+import six\n+\n import pwndbg.memoize\n \n NORMAL = \"\\x1b[0m\"\n@@ -72,9 +74,17 @@\n return re.sub('\\x1b\\\\[0m', NORMAL + color, x)\n \n def ljust_colored(x, length, char=' '):\n+ # TODO: workaround until issue #404\n+ if six.PY2:\n+ x = x if isinstance(x, six.text_type) else x.decode('utf8')\n+ char = char if isinstance(char, six.text_type) else char.decode('utf8')\n remaining = length - len(strip(x))\n return x + ((remaining // len(char) + 1) * char)[:remaining]\n \n def rjust_colored(x, length, char=' '):\n+ # TODO: workaround until issue #404\n+ if six.PY2:\n+ x = x if isinstance(x, six.text_type) else x.decode('utf8')\n+ char = char if isinstance(char, six.text_type) else char.decode('utf8')\n remaining = length - len(strip(x))\n return ((remaining // len(char) + 1) * char)[:remaining] + x\n", "issue": "utf8 decoding error\n### Description\r\nContext is not correctly displayed due to utf8 decoding error.\r\n\r\nTraceback (most recent call last):\r\n File \"/home/carstein/Tools/pwndbg/pwndbg/commands/__init__.py\", line 109, in __call__\r\n return self.function(*args, **kwargs)\r\n File \"/home/carstein/Tools/pwndbg/pwndbg/commands/__init__.py\", line 200, in _OnlyWhenRunning\r\n return function(*a, **kw)\r\n File \"/home/carstein/Tools/pwndbg/pwndbg/commands/context.py\", line 86, in context\r\n result.extend(func())\r\n File \"/home/carstein/Tools/pwndbg/pwndbg/commands/context.py\", line 98, in context_regs\r\n return [pwndbg.ui.banner(\"registers\")] + get_regs()\r\n File \"/home/carstein/Tools/pwndbg/pwndbg/ui.py\", line 52, in banner\r\n banner = ljust_colored(banner, width, str(config.banner_separator))\r\n File \"/home/carstein/Tools/pwndbg/pwndbg/color/__init__.py\", line 77, in ljust_colored\r\n return x + ((remaining // len(char) + 1) * char)[:remaining]\r\nUnicodeDecodeError: 'utf8' codec can't decode byte 0xe2 in position 105: unexpected end of data\r\n\r\n\r\n### Steps to reproduce\r\n1. Load binary into gdb\r\n2. `break main`\r\n3. `run`\r\n\r\n\r\n### My setup\r\npwndbg version: \r\nGdb: GNU gdb (GDB) 8.1\r\nPython: 2.7.13 (default, Nov 24 2017, 17:33:09) [GCC 6.3.0 20170516]\r\nPwndbg: 1.0.0 build: 869e832\r\nCapstone: 4.0.1024\r\nUnicorn: 1.0.1\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport re\n\nimport pwndbg.memoize\n\nNORMAL = \"\\x1b[0m\"\nBLACK = \"\\x1b[30m\"\nRED = \"\\x1b[31m\"\nGREEN = \"\\x1b[32m\"\nYELLOW = \"\\x1b[33m\"\nBLUE = \"\\x1b[34m\"\nPURPLE = \"\\x1b[35m\"\nCYAN = \"\\x1b[36m\"\nLIGHT_GREY = LIGHT_GRAY = \"\\x1b[37m\"\nFOREGROUND = \"\\x1b[39m\"\nGREY = GRAY = \"\\x1b[90m\"\nLIGHT_RED = \"\\x1b[91m\"\nLIGHT_GREEN = \"\\x1b[92m\"\nLIGHT_YELLOW = \"\\x1b[93m\"\nLIGHT_BLUE = \"\\x1b[94m\"\nLIGHT_PURPLE = \"\\x1b[95m\"\nLIGHT_CYAN = \"\\x1b[96m\"\nWHITE = \"\\x1b[97m\"\nBOLD = \"\\x1b[1m\"\nUNDERLINE = \"\\x1b[4m\"\n\ndef none(x): return str(x)\ndef normal(x): return colorize(x, NORMAL)\ndef black(x): return colorize(x, BLACK)\ndef red(x): return colorize(x, RED)\ndef green(x): return colorize(x, GREEN)\ndef yellow(x): return colorize(x, YELLOW)\ndef blue(x): return colorize(x, BLUE)\ndef purple(x): return colorize(x, PURPLE)\ndef cyan(x): return colorize(x, CYAN)\ndef light_gray(x): return colorize(x, LIGHT_GRAY)\ndef foreground(x): return colorize(x, FOREGROUND)\ndef gray(x): return colorize(x, GRAY)\ndef light_red(x): return colorize(x, LIGHT_RED)\ndef light_green(x): return colorize(x, LIGHT_GREEN)\ndef light_yellow(x): return colorize(x, LIGHT_YELLOW)\ndef light_blue(x): return colorize(x, LIGHT_BLUE)\ndef light_purple(x): return colorize(x, LIGHT_PURPLE)\ndef light_cyan(x): return colorize(x, LIGHT_CYAN)\ndef white(x): return colorize(x, WHITE)\ndef bold(x): return colorize(x, BOLD)\ndef underline(x): return colorize(x, UNDERLINE)\ndef colorize(x, color): return color + terminateWith(str(x), color) + NORMAL\n\[email protected]_on_stop\ndef generateColorFunctionInner(old, new):\n def wrapper(text):\n return new(old(text))\n return wrapper\n\ndef generateColorFunction(config):\n function = lambda x: x\n for color in str(config).split(','):\n function = generateColorFunctionInner(function, globals()[color.lower().replace('-', '_')])\n return function\n\ndef strip(x):\n return re.sub('\\x1b\\\\[\\d+m', '', x)\n\ndef terminateWith(x, color):\n return re.sub('\\x1b\\\\[0m', NORMAL + color, x)\n\ndef ljust_colored(x, length, char=' '):\n remaining = length - len(strip(x))\n return x + ((remaining // len(char) + 1) * char)[:remaining]\n\ndef rjust_colored(x, length, char=' '):\n remaining = length - len(strip(x))\n return ((remaining // len(char) + 1) * char)[:remaining] + x\n", "path": "pwndbg/color/__init__.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport re\n\nimport six\n\nimport pwndbg.memoize\n\nNORMAL = \"\\x1b[0m\"\nBLACK = \"\\x1b[30m\"\nRED = \"\\x1b[31m\"\nGREEN = \"\\x1b[32m\"\nYELLOW = \"\\x1b[33m\"\nBLUE = \"\\x1b[34m\"\nPURPLE = \"\\x1b[35m\"\nCYAN = \"\\x1b[36m\"\nLIGHT_GREY = LIGHT_GRAY = \"\\x1b[37m\"\nFOREGROUND = \"\\x1b[39m\"\nGREY = GRAY = \"\\x1b[90m\"\nLIGHT_RED = \"\\x1b[91m\"\nLIGHT_GREEN = \"\\x1b[92m\"\nLIGHT_YELLOW = \"\\x1b[93m\"\nLIGHT_BLUE = \"\\x1b[94m\"\nLIGHT_PURPLE = \"\\x1b[95m\"\nLIGHT_CYAN = \"\\x1b[96m\"\nWHITE = \"\\x1b[97m\"\nBOLD = \"\\x1b[1m\"\nUNDERLINE = \"\\x1b[4m\"\n\ndef none(x): return str(x)\ndef normal(x): return colorize(x, NORMAL)\ndef black(x): return colorize(x, BLACK)\ndef red(x): return colorize(x, RED)\ndef green(x): return colorize(x, GREEN)\ndef yellow(x): return colorize(x, YELLOW)\ndef blue(x): return colorize(x, BLUE)\ndef purple(x): return colorize(x, PURPLE)\ndef cyan(x): return colorize(x, CYAN)\ndef light_gray(x): return colorize(x, LIGHT_GRAY)\ndef foreground(x): return colorize(x, FOREGROUND)\ndef gray(x): return colorize(x, GRAY)\ndef light_red(x): return colorize(x, LIGHT_RED)\ndef light_green(x): return colorize(x, LIGHT_GREEN)\ndef light_yellow(x): return colorize(x, LIGHT_YELLOW)\ndef light_blue(x): return colorize(x, LIGHT_BLUE)\ndef light_purple(x): return colorize(x, LIGHT_PURPLE)\ndef light_cyan(x): return colorize(x, LIGHT_CYAN)\ndef white(x): return colorize(x, WHITE)\ndef bold(x): return colorize(x, BOLD)\ndef underline(x): return colorize(x, UNDERLINE)\ndef colorize(x, color): return color + terminateWith(str(x), color) + NORMAL\n\[email protected]_on_stop\ndef generateColorFunctionInner(old, new):\n def wrapper(text):\n return new(old(text))\n return wrapper\n\ndef generateColorFunction(config):\n function = lambda x: x\n for color in str(config).split(','):\n function = generateColorFunctionInner(function, globals()[color.lower().replace('-', '_')])\n return function\n\ndef strip(x):\n return re.sub('\\x1b\\\\[\\d+m', '', x)\n\ndef terminateWith(x, color):\n return re.sub('\\x1b\\\\[0m', NORMAL + color, x)\n\ndef ljust_colored(x, length, char=' '):\n # TODO: workaround until issue #404\n if six.PY2:\n x = x if isinstance(x, six.text_type) else x.decode('utf8')\n char = char if isinstance(char, six.text_type) else char.decode('utf8')\n remaining = length - len(strip(x))\n return x + ((remaining // len(char) + 1) * char)[:remaining]\n\ndef rjust_colored(x, length, char=' '):\n # TODO: workaround until issue #404\n if six.PY2:\n x = x if isinstance(x, six.text_type) else x.decode('utf8')\n char = char if isinstance(char, six.text_type) else char.decode('utf8')\n remaining = length - len(strip(x))\n return ((remaining // len(char) + 1) * char)[:remaining] + x\n", "path": "pwndbg/color/__init__.py"}]}
| 1,675 | 320 |
gh_patches_debug_17123
|
rasdani/github-patches
|
git_diff
|
localstack__localstack-9732
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
bug: Long Running Lambda Fails StepFunction State Machine Execution
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Current Behavior
As of `v3.0.0` and `v3.0.1`, StepFunction StateMachines that have long-running Lambda tasks fail execution. It also looks like the StateMachine then retries by re-invoking the lambda 3 times in the background with a 1 minute gap in between invocations. Unfortunately, the state machine will have already failed execution by this point and these lambda runs fail when they try to update the state.
The lambda is started successfully, but then fails with a timeout after 3 seconds:
```
2023-11-24T22:09:56.758 ERROR --- [ad-35 (eval)] l.s.s.a.c.eval_component : Exception=FailureEventException, Error=Exception, Details={"taskFailedEventDetails": {"error": "Exception", "cause": "{\"errorMessage\":\"2023-11-24T22:09:56Z dbd4767f-32b8-46b7-9ef4-382ee583ad0a Task timed out after 3.00 seconds\"}", "resource": "invoke", "resourceType": "lambda"}} at '(StateTaskServiceLambda| {'comment': None, 'input_path': (InputPath| {'input_path_src': '$'}, 'output_path': (OutputPath| {'output_path': '$'}, 'state_entered_event_type': 'TaskStateEntered', 'state_exited_event_type': 'TaskStateExited', 'result_path': None, 'result_selector': None, 'retry': (RetryDecl| {'retriers': [(RetrierDecl| {'error_equals': (ErrorEqualsDecl| {'error_names': [(CustomErrorName| {'error_name': 'Lambda.ClientExecutionTimeoutException'}, (CustomErrorName| {'error_name': 'Lambda.ServiceException'}, (CustomErrorName| {'error_name': 'Lambda.AWSLambdaException'}, (CustomErrorName| {'error_name': 'Lambda.SdkClientException'}]}, 'interval_seconds': (IntervalSecondsDecl| {'seconds': 2}, 'max_attempts': (MaxAttemptsDecl| {'attempts': 6}, 'backoff_rate': (BackoffRateDecl| {'rate': 2.0}, '_attempts_counter': 0, '_next_interval_seconds': 2}]}, 'catch': None, 'timeout': (TimeoutSeconds| {'timeout_seconds': 99999999, 'is_default': None}, 'heartbeat': None, 'parameters': (Parameters| {'payload_tmpl': (PayloadTmpl| {'payload_bindings': [(PayloadBindingValue| {'field': 'FunctionName', 'value': (PayloadValueStr| {'val': 'arn:aws:lambda:us-east-1:000000000000:function:TestAppStack-lambdaslongrunning51EEA4-b04d9aee'}}, (PayloadBindingPath| {'field': 'Payload', 'path': '$'}]}}, 'name': 'long-running-task', 'state_type': <StateType.Task: 15>, 'continue_with': <localstack.services.stepfunctions.asl.component.state.state_continue_with.ContinueWithEnd object at 0xfffee6793b90>, 'resource': (ServiceResource| {'_region': '', '_account': '', 'resource_arn': 'arn:aws:states:::lambda:invoke', 'partition': 'aws', 'service_name': 'lambda', 'api_name': 'lambda', 'api_action': 'invoke', 'condition': None}}'
```
Even if I specify long timeouts on both the Lambda and the LambdaTask the state machine still fails the task after 3 seconds. This was working in version 2, and if I use the old StepFunctions provider, the StateMachine completes successfully.
### Expected Behavior
The State Machine should finish successfully because the long running lambda finishes before the timeout.
### How are you starting LocalStack?
With a docker-compose file
### Steps To Reproduce
#### How are you starting localstack (e.g., `bin/localstack` command, arguments, or `docker-compose.yml`)
I've created a repository that demonstrates the bug: https://github.com/noseworthy/localstack-sfn-bugs. I'm using localstack pro, so your terminal must have `LOCALSTACK_AUTH_TOKEN` specified. This should work with non-pro localstack however. You just need to modify the `compose.yaml` file.
1. Start localstack using docker-compose: `docker compose up --force-recreate --build -d`
#### Client commands (e.g., AWS SDK code snippet, or sequence of "awslocal" commands)
1. Install dependencies: `yarn install`
2. Bootstrap the CDK project: `yarn cdklocal bootstrap`
3. Deploy the CDK project: `yarn cdklocal deploy`
4. Trigger the state machine: `yarn trigger`
Watch as the statemachine tries to execute, but fails saying that the long running lambda timed out after 3.00 seconds.
### Environment
```markdown
- OS: macOS Sonoma 14.1.1 (23B81)
- LocalStack: v3.0.1 Pro Docker Image
```
### Anything else?
Demo Repository: https://github.com/noseworthy/localstack-sfn-bugs
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `localstack/services/stepfunctions/asl/utils/boto_client.py`
Content:
```
1 from botocore.client import BaseClient
2 from botocore.config import Config
3
4 from localstack.aws.connect import connect_to
5
6
7 def boto_client_for(region: str, account: str, service: str) -> BaseClient:
8 return connect_to.get_client(
9 aws_access_key_id=account,
10 region_name=region,
11 service_name=service,
12 config=Config(parameter_validation=False),
13 )
14
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/localstack/services/stepfunctions/asl/utils/boto_client.py b/localstack/services/stepfunctions/asl/utils/boto_client.py
--- a/localstack/services/stepfunctions/asl/utils/boto_client.py
+++ b/localstack/services/stepfunctions/asl/utils/boto_client.py
@@ -2,6 +2,7 @@
from botocore.config import Config
from localstack.aws.connect import connect_to
+from localstack.services.stepfunctions.asl.component.common.timeouts.timeout import TimeoutSeconds
def boto_client_for(region: str, account: str, service: str) -> BaseClient:
@@ -9,5 +10,10 @@
aws_access_key_id=account,
region_name=region,
service_name=service,
- config=Config(parameter_validation=False),
+ config=Config(
+ parameter_validation=False,
+ retries={"max_attempts": 0, "total_max_attempts": 1},
+ connect_timeout=TimeoutSeconds.DEFAULT_TIMEOUT_SECONDS,
+ read_timeout=TimeoutSeconds.DEFAULT_TIMEOUT_SECONDS,
+ ),
)
|
{"golden_diff": "diff --git a/localstack/services/stepfunctions/asl/utils/boto_client.py b/localstack/services/stepfunctions/asl/utils/boto_client.py\n--- a/localstack/services/stepfunctions/asl/utils/boto_client.py\n+++ b/localstack/services/stepfunctions/asl/utils/boto_client.py\n@@ -2,6 +2,7 @@\n from botocore.config import Config\n \n from localstack.aws.connect import connect_to\n+from localstack.services.stepfunctions.asl.component.common.timeouts.timeout import TimeoutSeconds\n \n \n def boto_client_for(region: str, account: str, service: str) -> BaseClient:\n@@ -9,5 +10,10 @@\n aws_access_key_id=account,\n region_name=region,\n service_name=service,\n- config=Config(parameter_validation=False),\n+ config=Config(\n+ parameter_validation=False,\n+ retries={\"max_attempts\": 0, \"total_max_attempts\": 1},\n+ connect_timeout=TimeoutSeconds.DEFAULT_TIMEOUT_SECONDS,\n+ read_timeout=TimeoutSeconds.DEFAULT_TIMEOUT_SECONDS,\n+ ),\n )\n", "issue": "bug: Long Running Lambda Fails StepFunction State Machine Execution\n### Is there an existing issue for this?\r\n\r\n- [X] I have searched the existing issues\r\n\r\n### Current Behavior\r\n\r\nAs of `v3.0.0` and `v3.0.1`, StepFunction StateMachines that have long-running Lambda tasks fail execution. It also looks like the StateMachine then retries by re-invoking the lambda 3 times in the background with a 1 minute gap in between invocations. Unfortunately, the state machine will have already failed execution by this point and these lambda runs fail when they try to update the state.\r\n\r\nThe lambda is started successfully, but then fails with a timeout after 3 seconds:\r\n\r\n```\r\n2023-11-24T22:09:56.758 ERROR --- [ad-35 (eval)] l.s.s.a.c.eval_component : Exception=FailureEventException, Error=Exception, Details={\"taskFailedEventDetails\": {\"error\": \"Exception\", \"cause\": \"{\\\"errorMessage\\\":\\\"2023-11-24T22:09:56Z dbd4767f-32b8-46b7-9ef4-382ee583ad0a Task timed out after 3.00 seconds\\\"}\", \"resource\": \"invoke\", \"resourceType\": \"lambda\"}} at '(StateTaskServiceLambda| {'comment': None, 'input_path': (InputPath| {'input_path_src': '$'}, 'output_path': (OutputPath| {'output_path': '$'}, 'state_entered_event_type': 'TaskStateEntered', 'state_exited_event_type': 'TaskStateExited', 'result_path': None, 'result_selector': None, 'retry': (RetryDecl| {'retriers': [(RetrierDecl| {'error_equals': (ErrorEqualsDecl| {'error_names': [(CustomErrorName| {'error_name': 'Lambda.ClientExecutionTimeoutException'}, (CustomErrorName| {'error_name': 'Lambda.ServiceException'}, (CustomErrorName| {'error_name': 'Lambda.AWSLambdaException'}, (CustomErrorName| {'error_name': 'Lambda.SdkClientException'}]}, 'interval_seconds': (IntervalSecondsDecl| {'seconds': 2}, 'max_attempts': (MaxAttemptsDecl| {'attempts': 6}, 'backoff_rate': (BackoffRateDecl| {'rate': 2.0}, '_attempts_counter': 0, '_next_interval_seconds': 2}]}, 'catch': None, 'timeout': (TimeoutSeconds| {'timeout_seconds': 99999999, 'is_default': None}, 'heartbeat': None, 'parameters': (Parameters| {'payload_tmpl': (PayloadTmpl| {'payload_bindings': [(PayloadBindingValue| {'field': 'FunctionName', 'value': (PayloadValueStr| {'val': 'arn:aws:lambda:us-east-1:000000000000:function:TestAppStack-lambdaslongrunning51EEA4-b04d9aee'}}, (PayloadBindingPath| {'field': 'Payload', 'path': '$'}]}}, 'name': 'long-running-task', 'state_type': <StateType.Task: 15>, 'continue_with': <localstack.services.stepfunctions.asl.component.state.state_continue_with.ContinueWithEnd object at 0xfffee6793b90>, 'resource': (ServiceResource| {'_region': '', '_account': '', 'resource_arn': 'arn:aws:states:::lambda:invoke', 'partition': 'aws', 'service_name': 'lambda', 'api_name': 'lambda', 'api_action': 'invoke', 'condition': None}}'\r\n```\r\n\r\nEven if I specify long timeouts on both the Lambda and the LambdaTask the state machine still fails the task after 3 seconds. This was working in version 2, and if I use the old StepFunctions provider, the StateMachine completes successfully.\r\n\r\n### Expected Behavior\r\n\r\nThe State Machine should finish successfully because the long running lambda finishes before the timeout.\r\n\r\n### How are you starting LocalStack?\r\n\r\nWith a docker-compose file\r\n\r\n### Steps To Reproduce\r\n\r\n#### How are you starting localstack (e.g., `bin/localstack` command, arguments, or `docker-compose.yml`)\r\n\r\nI've created a repository that demonstrates the bug: https://github.com/noseworthy/localstack-sfn-bugs. I'm using localstack pro, so your terminal must have `LOCALSTACK_AUTH_TOKEN` specified. This should work with non-pro localstack however. You just need to modify the `compose.yaml` file.\r\n\r\n1. Start localstack using docker-compose: `docker compose up --force-recreate --build -d`\r\n\r\n#### Client commands (e.g., AWS SDK code snippet, or sequence of \"awslocal\" commands)\r\n\r\n1. Install dependencies: `yarn install`\r\n2. Bootstrap the CDK project: `yarn cdklocal bootstrap`\r\n3. Deploy the CDK project: `yarn cdklocal deploy`\r\n4. Trigger the state machine: `yarn trigger`\r\n\r\nWatch as the statemachine tries to execute, but fails saying that the long running lambda timed out after 3.00 seconds.\r\n\r\n### Environment\r\n\r\n```markdown\r\n- OS: macOS Sonoma 14.1.1 (23B81)\r\n- LocalStack: v3.0.1 Pro Docker Image\r\n```\r\n\r\n\r\n### Anything else?\r\n\r\nDemo Repository: https://github.com/noseworthy/localstack-sfn-bugs\n", "before_files": [{"content": "from botocore.client import BaseClient\nfrom botocore.config import Config\n\nfrom localstack.aws.connect import connect_to\n\n\ndef boto_client_for(region: str, account: str, service: str) -> BaseClient:\n return connect_to.get_client(\n aws_access_key_id=account,\n region_name=region,\n service_name=service,\n config=Config(parameter_validation=False),\n )\n", "path": "localstack/services/stepfunctions/asl/utils/boto_client.py"}], "after_files": [{"content": "from botocore.client import BaseClient\nfrom botocore.config import Config\n\nfrom localstack.aws.connect import connect_to\nfrom localstack.services.stepfunctions.asl.component.common.timeouts.timeout import TimeoutSeconds\n\n\ndef boto_client_for(region: str, account: str, service: str) -> BaseClient:\n return connect_to.get_client(\n aws_access_key_id=account,\n region_name=region,\n service_name=service,\n config=Config(\n parameter_validation=False,\n retries={\"max_attempts\": 0, \"total_max_attempts\": 1},\n connect_timeout=TimeoutSeconds.DEFAULT_TIMEOUT_SECONDS,\n read_timeout=TimeoutSeconds.DEFAULT_TIMEOUT_SECONDS,\n ),\n )\n", "path": "localstack/services/stepfunctions/asl/utils/boto_client.py"}]}
| 1,547 | 228 |
gh_patches_debug_22038
|
rasdani/github-patches
|
git_diff
|
ietf-tools__datatracker-6836
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
/doc/help/state/draft-stream-editorial/ fails with a 404
The page is looking for a state it cannot find.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ietf/doc/views_help.py`
Content:
```
1 # Copyright The IETF Trust 2013-2023, All Rights Reserved
2
3 from django.shortcuts import render, get_object_or_404
4 from django.http import Http404
5
6 from ietf.doc.models import State, StateType, IESG_SUBSTATE_TAGS
7 from ietf.name.models import DocRelationshipName, DocTagName
8 from ietf.doc.utils import get_tags_for_stream_id
9
10 def state_help(request, type=None):
11 slug, title = {
12 "draft-iesg": ("draft-iesg", "IESG States for Internet-Drafts"),
13 "draft-rfceditor": ("draft-rfceditor", "RFC Editor States for Internet-Drafts"),
14 "draft-iana-action": ("draft-iana-action", "IANA Action States for Internet-Drafts"),
15 "draft-iana-review": ("draft-iana-review", "IANA Review States for Internet-Drafts"),
16 "draft-iana-experts": ("draft-iana-experts", "IANA Expert Review States for Internet-Drafts"),
17 "draft-stream-ietf": ("draft-stream-ietf", "IETF Stream States for Internet-Drafts"),
18 "draft-stream-irtf": ("draft-stream-irtf", "IRTF Stream States for Internet-Drafts"),
19 "draft-stream-ise": ("draft-stream-ise", "ISE Stream States for Internet-Drafts"),
20 "draft-stream-iab": ("draft-stream-iab", "IAB Stream States for Internet-Drafts"),
21 "charter": ("charter", "Charter States"),
22 "conflict-review": ("conflrev", "Conflict Review States"),
23 "status-change": ("statchg", "RFC Status Change States"),
24 "bofreq": ("bofreq", "BOF Request States"),
25 "procmaterials": ("procmaterials", "Proceedings Materials States"),
26 "statement": {"statement", "Statement States"}
27 }.get(type, (None, None))
28 state_type = get_object_or_404(StateType, slug=slug)
29
30 states = State.objects.filter(used=True, type=state_type).order_by("order")
31
32 has_next_states = False
33 for state in states:
34 if state.next_states.all():
35 has_next_states = True
36 break
37
38 tags = []
39
40 if state_type.slug == "draft-iesg":
41 tags = DocTagName.objects.filter(slug__in=IESG_SUBSTATE_TAGS)
42 elif state_type.slug.startswith("draft-stream-"):
43 possible = get_tags_for_stream_id(state_type.slug.replace("draft-stream-", ""))
44 tags = DocTagName.objects.filter(slug__in=possible)
45
46 return render(request, "doc/state_help.html",
47 {
48 "title": title,
49 "state_type": state_type,
50 "states": states,
51 "has_next_states": has_next_states,
52 "tags": tags,
53 } )
54
55 def relationship_help(request,subset=None):
56 subsets = { "reference": ['refnorm','refinfo','refunk','refold'],
57 "status" : ['tops','tois','tohist','toinf','tobcp','toexp'],
58 }
59 if subset and subset not in subsets:
60 raise Http404()
61 rels = DocRelationshipName.objects.filter(used=True)
62 if subset:
63 rels = rels.filter(slug__in=subsets[subset])
64 return render(request, "doc/relationship_help.html", { "relations": rels } )
65
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/ietf/doc/views_help.py b/ietf/doc/views_help.py
--- a/ietf/doc/views_help.py
+++ b/ietf/doc/views_help.py
@@ -1,5 +1,7 @@
# Copyright The IETF Trust 2013-2023, All Rights Reserved
+import debug # pyflakes: ignore
+
from django.shortcuts import render, get_object_or_404
from django.http import Http404
@@ -18,6 +20,7 @@
"draft-stream-irtf": ("draft-stream-irtf", "IRTF Stream States for Internet-Drafts"),
"draft-stream-ise": ("draft-stream-ise", "ISE Stream States for Internet-Drafts"),
"draft-stream-iab": ("draft-stream-iab", "IAB Stream States for Internet-Drafts"),
+ "draft-stream-editorial": ("draft-stream-editorial", "Editorial Stream States for Internet-Drafts"),
"charter": ("charter", "Charter States"),
"conflict-review": ("conflrev", "Conflict Review States"),
"status-change": ("statchg", "RFC Status Change States"),
|
{"golden_diff": "diff --git a/ietf/doc/views_help.py b/ietf/doc/views_help.py\n--- a/ietf/doc/views_help.py\n+++ b/ietf/doc/views_help.py\n@@ -1,5 +1,7 @@\n # Copyright The IETF Trust 2013-2023, All Rights Reserved\n \n+import debug # pyflakes: ignore\n+\n from django.shortcuts import render, get_object_or_404\n from django.http import Http404\n \n@@ -18,6 +20,7 @@\n \"draft-stream-irtf\": (\"draft-stream-irtf\", \"IRTF Stream States for Internet-Drafts\"),\n \"draft-stream-ise\": (\"draft-stream-ise\", \"ISE Stream States for Internet-Drafts\"),\n \"draft-stream-iab\": (\"draft-stream-iab\", \"IAB Stream States for Internet-Drafts\"),\n+ \"draft-stream-editorial\": (\"draft-stream-editorial\", \"Editorial Stream States for Internet-Drafts\"),\n \"charter\": (\"charter\", \"Charter States\"),\n \"conflict-review\": (\"conflrev\", \"Conflict Review States\"),\n \"status-change\": (\"statchg\", \"RFC Status Change States\"),\n", "issue": "/doc/help/state/draft-stream-editorial/ fails with a 404\nThe page is looking for a state it cannot find.\n", "before_files": [{"content": "# Copyright The IETF Trust 2013-2023, All Rights Reserved\n\nfrom django.shortcuts import render, get_object_or_404\nfrom django.http import Http404\n\nfrom ietf.doc.models import State, StateType, IESG_SUBSTATE_TAGS\nfrom ietf.name.models import DocRelationshipName, DocTagName\nfrom ietf.doc.utils import get_tags_for_stream_id\n\ndef state_help(request, type=None):\n slug, title = {\n \"draft-iesg\": (\"draft-iesg\", \"IESG States for Internet-Drafts\"),\n \"draft-rfceditor\": (\"draft-rfceditor\", \"RFC Editor States for Internet-Drafts\"),\n \"draft-iana-action\": (\"draft-iana-action\", \"IANA Action States for Internet-Drafts\"),\n \"draft-iana-review\": (\"draft-iana-review\", \"IANA Review States for Internet-Drafts\"),\n \"draft-iana-experts\": (\"draft-iana-experts\", \"IANA Expert Review States for Internet-Drafts\"),\n \"draft-stream-ietf\": (\"draft-stream-ietf\", \"IETF Stream States for Internet-Drafts\"),\n \"draft-stream-irtf\": (\"draft-stream-irtf\", \"IRTF Stream States for Internet-Drafts\"),\n \"draft-stream-ise\": (\"draft-stream-ise\", \"ISE Stream States for Internet-Drafts\"),\n \"draft-stream-iab\": (\"draft-stream-iab\", \"IAB Stream States for Internet-Drafts\"),\n \"charter\": (\"charter\", \"Charter States\"),\n \"conflict-review\": (\"conflrev\", \"Conflict Review States\"),\n \"status-change\": (\"statchg\", \"RFC Status Change States\"),\n \"bofreq\": (\"bofreq\", \"BOF Request States\"),\n \"procmaterials\": (\"procmaterials\", \"Proceedings Materials States\"),\n \"statement\": {\"statement\", \"Statement States\"}\n }.get(type, (None, None))\n state_type = get_object_or_404(StateType, slug=slug)\n\n states = State.objects.filter(used=True, type=state_type).order_by(\"order\")\n\n has_next_states = False\n for state in states:\n if state.next_states.all():\n has_next_states = True\n break\n\n tags = []\n\n if state_type.slug == \"draft-iesg\":\n tags = DocTagName.objects.filter(slug__in=IESG_SUBSTATE_TAGS)\n elif state_type.slug.startswith(\"draft-stream-\"):\n possible = get_tags_for_stream_id(state_type.slug.replace(\"draft-stream-\", \"\"))\n tags = DocTagName.objects.filter(slug__in=possible)\n\n return render(request, \"doc/state_help.html\",\n {\n \"title\": title,\n \"state_type\": state_type,\n \"states\": states,\n \"has_next_states\": has_next_states,\n \"tags\": tags,\n } )\n\ndef relationship_help(request,subset=None):\n subsets = { \"reference\": ['refnorm','refinfo','refunk','refold'],\n \"status\" : ['tops','tois','tohist','toinf','tobcp','toexp'],\n }\n if subset and subset not in subsets:\n raise Http404()\n rels = DocRelationshipName.objects.filter(used=True)\n if subset:\n rels = rels.filter(slug__in=subsets[subset]) \n return render(request, \"doc/relationship_help.html\", { \"relations\": rels } )\n", "path": "ietf/doc/views_help.py"}], "after_files": [{"content": "# Copyright The IETF Trust 2013-2023, All Rights Reserved\n\nimport debug # pyflakes: ignore\n\nfrom django.shortcuts import render, get_object_or_404\nfrom django.http import Http404\n\nfrom ietf.doc.models import State, StateType, IESG_SUBSTATE_TAGS\nfrom ietf.name.models import DocRelationshipName, DocTagName\nfrom ietf.doc.utils import get_tags_for_stream_id\n\ndef state_help(request, type=None):\n slug, title = {\n \"draft-iesg\": (\"draft-iesg\", \"IESG States for Internet-Drafts\"),\n \"draft-rfceditor\": (\"draft-rfceditor\", \"RFC Editor States for Internet-Drafts\"),\n \"draft-iana-action\": (\"draft-iana-action\", \"IANA Action States for Internet-Drafts\"),\n \"draft-iana-review\": (\"draft-iana-review\", \"IANA Review States for Internet-Drafts\"),\n \"draft-iana-experts\": (\"draft-iana-experts\", \"IANA Expert Review States for Internet-Drafts\"),\n \"draft-stream-ietf\": (\"draft-stream-ietf\", \"IETF Stream States for Internet-Drafts\"),\n \"draft-stream-irtf\": (\"draft-stream-irtf\", \"IRTF Stream States for Internet-Drafts\"),\n \"draft-stream-ise\": (\"draft-stream-ise\", \"ISE Stream States for Internet-Drafts\"),\n \"draft-stream-iab\": (\"draft-stream-iab\", \"IAB Stream States for Internet-Drafts\"),\n \"draft-stream-editorial\": (\"draft-stream-editorial\", \"Editorial Stream States for Internet-Drafts\"),\n \"charter\": (\"charter\", \"Charter States\"),\n \"conflict-review\": (\"conflrev\", \"Conflict Review States\"),\n \"status-change\": (\"statchg\", \"RFC Status Change States\"),\n \"bofreq\": (\"bofreq\", \"BOF Request States\"),\n \"procmaterials\": (\"procmaterials\", \"Proceedings Materials States\"),\n \"statement\": {\"statement\", \"Statement States\"}\n }.get(type, (None, None))\n state_type = get_object_or_404(StateType, slug=slug)\n\n states = State.objects.filter(used=True, type=state_type).order_by(\"order\")\n\n has_next_states = False\n for state in states:\n if state.next_states.all():\n has_next_states = True\n break\n\n tags = []\n\n if state_type.slug == \"draft-iesg\":\n tags = DocTagName.objects.filter(slug__in=IESG_SUBSTATE_TAGS)\n elif state_type.slug.startswith(\"draft-stream-\"):\n possible = get_tags_for_stream_id(state_type.slug.replace(\"draft-stream-\", \"\"))\n tags = DocTagName.objects.filter(slug__in=possible)\n\n return render(request, \"doc/state_help.html\",\n {\n \"title\": title,\n \"state_type\": state_type,\n \"states\": states,\n \"has_next_states\": has_next_states,\n \"tags\": tags,\n } )\n\ndef relationship_help(request,subset=None):\n subsets = { \"reference\": ['refnorm','refinfo','refunk','refold'],\n \"status\" : ['tops','tois','tohist','toinf','tobcp','toexp'],\n }\n if subset and subset not in subsets:\n raise Http404()\n rels = DocRelationshipName.objects.filter(used=True)\n if subset:\n rels = rels.filter(slug__in=subsets[subset]) \n return render(request, \"doc/relationship_help.html\", { \"relations\": rels } )\n", "path": "ietf/doc/views_help.py"}]}
| 1,161 | 257 |
gh_patches_debug_35166
|
rasdani/github-patches
|
git_diff
|
gwastro__pycbc-3561
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Need to decode approximant strings in pycbc_condition_strain
Currently pycbc_condition_strain does not recognize approximants after reading them from the variable parameters table of an injection hdf. For example, when approximant 'SEOBNRv4' is stored as a variable parameter in an injection hdf, reading the hdf gives back a string b'SEOBNRv4' which is not recognized as being the same thing as 'SEOBNRv4'
Running the pycbc live test contained in my pull request https://github.com/gwastro/pycbc/pull/3322 causes an error when pycbc_condition_strain gets to Applying Injections
2020-07-24 12:14:04,373 Applying injections
Traceback (most recent call last):
File "/home/max.trevor/dev_env/bin/pycbc_condition_strain", line 4, in <module>
__import__('pkg_resources').run_script('PyCBC===e9f3da', 'pycbc_condition_strain')
File "/home/max.trevor/dev_env/lib/python3.6/site-packages/pkg_resources/__init__.py", line 667, in run_script
self.require(requires)[0].run_script(script_name, ns)
File "/home/max.trevor/dev_env/lib/python3.6/site-packages/pkg_resources/__init__.py", line 1464, in run_script
exec(code, namespace, namespace)
File "/home/max.trevor/dev_env/lib/python3.6/site-packages/PyCBC-e9f3da-py3.6-linux-x86_64.egg/EGG-INFO/scripts/pycbc_condition_strain", line 87, in <module>
precision=args.output_precision)
File "/home/max.trevor/dev_env/lib/python3.6/site-packages/PyCBC-e9f3da-py3.6-linux-x86_64.egg/pycbc/strain/strain.py", line 392, in from_cli
inj_filter_rejector=inj_filter_rejector)
File "/home/max.trevor/dev_env/lib/python3.6/site-packages/PyCBC-e9f3da-py3.6-linux-x86_64.egg/pycbc/inject/inject.py", line 505, in apply
detector_name, f_lower=f_l, distance_scale=distance_scale)
File "/home/max.trevor/dev_env/lib/python3.6/site-packages/PyCBC-e9f3da-py3.6-linux-x86_64.egg/pycbc/inject/inject.py", line 557, in make_strain_from_inj_object
**self.extra_args)
File "/home/max.trevor/dev_env/lib/python3.6/site-packages/PyCBC-e9f3da-py3.6-linux-x86_64.egg/pycbc/waveform/waveform.py", line 462, in get_td_waveform
(input_params['approximant']))
ValueError: Approximant b'SEOBNRv4' not available
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `examples/live/generate_injections.py`
Content:
```
1 #!/usr/bin/env python
2
3 import os
4 import sys
5 from pycbc.io import FieldArray
6 from pycbc.inject import InjectionSet
7
8
9 if os.path.exists('./test_inj1.hdf'):
10 raise OSError("output-file 1 already exists")
11
12 if os.path.exists('./test_inj2.hdf'):
13 raise OSError("output-file 2 already exists")
14
15 dtype = [('mass1', float), ('mass2', float),
16 ('spin1z', float), ('spin2z', float),
17 ('tc', float), ('distance', float)]
18
19 # injection 1
20 static_params = {'f_lower': 18.0, 'f_ref': 18.0, 'approximant': 'SEOBNRv4',
21 'taper': 'start', 'ra': 45.0, 'dec': 45.0,
22 'inclination': 0.0, 'coa_phase': 0.0, 'polarization': 0.0}
23
24 samples = FieldArray(1, dtype=dtype)
25
26 # The following 'magic numbers' are intended to match the highest
27 # mass injection in the template bank
28 samples['mass1'] = [290.929321]
29 samples['mass2'] = [3.6755455]
30 samples['spin1z'] = [0.9934847]
31 samples['spin2z'] = [0.92713535]
32 samples['tc'] = [1272790100.1]
33 samples['distance'] = [301.5]
34
35 InjectionSet.write('test_inj1.hdf', samples, static_args=static_params,
36 injtype='cbc', cmd=" ".join(sys.argv))
37
38 # injection 2
39 static_params['approximant'] = 'SpinTaylorT4'
40
41 samples = FieldArray(1, dtype=dtype)
42
43 # The following 'magic numbers' are intended to match the lowest
44 # mass injection in the template bank
45 samples['mass1'] = [1.1331687]
46 samples['mass2'] = [1.010624]
47 samples['spin1z'] = [0.029544285]
48 samples['spin2z'] = [0.020993788]
49 samples['tc'] = [1272790260.1]
50 samples['distance'] = [36.0]
51
52 InjectionSet.write('test_inj2.hdf', samples, static_args=static_params,
53 injtype='cbc', cmd=" ".join(sys.argv))
54
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/examples/live/generate_injections.py b/examples/live/generate_injections.py
--- a/examples/live/generate_injections.py
+++ b/examples/live/generate_injections.py
@@ -1,53 +1,30 @@
#!/usr/bin/env python
-import os
import sys
from pycbc.io import FieldArray
from pycbc.inject import InjectionSet
-if os.path.exists('./test_inj1.hdf'):
- raise OSError("output-file 1 already exists")
-
-if os.path.exists('./test_inj2.hdf'):
- raise OSError("output-file 2 already exists")
-
dtype = [('mass1', float), ('mass2', float),
('spin1z', float), ('spin2z', float),
- ('tc', float), ('distance', float)]
+ ('tc', float), ('distance', float),
+ ('approximant', 'S32')]
-# injection 1
-static_params = {'f_lower': 18.0, 'f_ref': 18.0, 'approximant': 'SEOBNRv4',
+static_params = {'f_lower': 18.0, 'f_ref': 18.0,
'taper': 'start', 'ra': 45.0, 'dec': 45.0,
'inclination': 0.0, 'coa_phase': 0.0, 'polarization': 0.0}
-samples = FieldArray(1, dtype=dtype)
+samples = FieldArray(2, dtype=dtype)
# The following 'magic numbers' are intended to match the highest
-# mass injection in the template bank
-samples['mass1'] = [290.929321]
-samples['mass2'] = [3.6755455]
-samples['spin1z'] = [0.9934847]
-samples['spin2z'] = [0.92713535]
-samples['tc'] = [1272790100.1]
-samples['distance'] = [301.5]
-
-InjectionSet.write('test_inj1.hdf', samples, static_args=static_params,
- injtype='cbc', cmd=" ".join(sys.argv))
-
-# injection 2
-static_params['approximant'] = 'SpinTaylorT4'
-
-samples = FieldArray(1, dtype=dtype)
-
-# The following 'magic numbers' are intended to match the lowest
-# mass injection in the template bank
-samples['mass1'] = [1.1331687]
-samples['mass2'] = [1.010624]
-samples['spin1z'] = [0.029544285]
-samples['spin2z'] = [0.020993788]
-samples['tc'] = [1272790260.1]
-samples['distance'] = [36.0]
-
-InjectionSet.write('test_inj2.hdf', samples, static_args=static_params,
+# and lowest mass templates in the template bank
+samples['mass1'] = [290.929321, 1.1331687]
+samples['mass2'] = [3.6755455, 1.010624]
+samples['spin1z'] = [0.9934847, 0.029544285]
+samples['spin2z'] = [0.92713535, 0.020993788]
+samples['tc'] = [1272790100.1, 1272790260.1]
+samples['distance'] = [301.5, 36.0]
+samples['approximant'] = ['SEOBNRv4', 'SpinTaylorT4']
+
+InjectionSet.write('injections.hdf', samples, static_args=static_params,
injtype='cbc', cmd=" ".join(sys.argv))
|
{"golden_diff": "diff --git a/examples/live/generate_injections.py b/examples/live/generate_injections.py\n--- a/examples/live/generate_injections.py\n+++ b/examples/live/generate_injections.py\n@@ -1,53 +1,30 @@\n #!/usr/bin/env python\n \n-import os\n import sys\n from pycbc.io import FieldArray\n from pycbc.inject import InjectionSet\n \n \n-if os.path.exists('./test_inj1.hdf'):\n- raise OSError(\"output-file 1 already exists\")\n-\n-if os.path.exists('./test_inj2.hdf'):\n- raise OSError(\"output-file 2 already exists\")\n-\n dtype = [('mass1', float), ('mass2', float),\n ('spin1z', float), ('spin2z', float),\n- ('tc', float), ('distance', float)]\n+ ('tc', float), ('distance', float),\n+ ('approximant', 'S32')]\n \n-# injection 1\n-static_params = {'f_lower': 18.0, 'f_ref': 18.0, 'approximant': 'SEOBNRv4',\n+static_params = {'f_lower': 18.0, 'f_ref': 18.0,\n 'taper': 'start', 'ra': 45.0, 'dec': 45.0,\n 'inclination': 0.0, 'coa_phase': 0.0, 'polarization': 0.0}\n \n-samples = FieldArray(1, dtype=dtype)\n+samples = FieldArray(2, dtype=dtype)\n \n # The following 'magic numbers' are intended to match the highest\n-# mass injection in the template bank\n-samples['mass1'] = [290.929321]\n-samples['mass2'] = [3.6755455]\n-samples['spin1z'] = [0.9934847]\n-samples['spin2z'] = [0.92713535]\n-samples['tc'] = [1272790100.1]\n-samples['distance'] = [301.5]\n-\n-InjectionSet.write('test_inj1.hdf', samples, static_args=static_params,\n- injtype='cbc', cmd=\" \".join(sys.argv))\n-\n-# injection 2\n-static_params['approximant'] = 'SpinTaylorT4'\n-\n-samples = FieldArray(1, dtype=dtype)\n-\n-# The following 'magic numbers' are intended to match the lowest\n-# mass injection in the template bank\n-samples['mass1'] = [1.1331687]\n-samples['mass2'] = [1.010624]\n-samples['spin1z'] = [0.029544285]\n-samples['spin2z'] = [0.020993788]\n-samples['tc'] = [1272790260.1]\n-samples['distance'] = [36.0]\n-\n-InjectionSet.write('test_inj2.hdf', samples, static_args=static_params,\n+# and lowest mass templates in the template bank\n+samples['mass1'] = [290.929321, 1.1331687]\n+samples['mass2'] = [3.6755455, 1.010624]\n+samples['spin1z'] = [0.9934847, 0.029544285]\n+samples['spin2z'] = [0.92713535, 0.020993788]\n+samples['tc'] = [1272790100.1, 1272790260.1]\n+samples['distance'] = [301.5, 36.0]\n+samples['approximant'] = ['SEOBNRv4', 'SpinTaylorT4']\n+\n+InjectionSet.write('injections.hdf', samples, static_args=static_params,\n injtype='cbc', cmd=\" \".join(sys.argv))\n", "issue": "Need to decode approximant strings in pycbc_condition_strain \nCurrently pycbc_condition_strain does not recognize approximants after reading them from the variable parameters table of an injection hdf. For example, when approximant 'SEOBNRv4' is stored as a variable parameter in an injection hdf, reading the hdf gives back a string b'SEOBNRv4' which is not recognized as being the same thing as 'SEOBNRv4'\r\n\r\nRunning the pycbc live test contained in my pull request https://github.com/gwastro/pycbc/pull/3322 causes an error when pycbc_condition_strain gets to Applying Injections\r\n\r\n2020-07-24 12:14:04,373 Applying injections\r\nTraceback (most recent call last):\r\n File \"/home/max.trevor/dev_env/bin/pycbc_condition_strain\", line 4, in <module>\r\n __import__('pkg_resources').run_script('PyCBC===e9f3da', 'pycbc_condition_strain')\r\n File \"/home/max.trevor/dev_env/lib/python3.6/site-packages/pkg_resources/__init__.py\", line 667, in run_script\r\n self.require(requires)[0].run_script(script_name, ns)\r\n File \"/home/max.trevor/dev_env/lib/python3.6/site-packages/pkg_resources/__init__.py\", line 1464, in run_script\r\n exec(code, namespace, namespace)\r\n File \"/home/max.trevor/dev_env/lib/python3.6/site-packages/PyCBC-e9f3da-py3.6-linux-x86_64.egg/EGG-INFO/scripts/pycbc_condition_strain\", line 87, in <module>\r\n precision=args.output_precision)\r\n File \"/home/max.trevor/dev_env/lib/python3.6/site-packages/PyCBC-e9f3da-py3.6-linux-x86_64.egg/pycbc/strain/strain.py\", line 392, in from_cli\r\n inj_filter_rejector=inj_filter_rejector)\r\n File \"/home/max.trevor/dev_env/lib/python3.6/site-packages/PyCBC-e9f3da-py3.6-linux-x86_64.egg/pycbc/inject/inject.py\", line 505, in apply\r\n detector_name, f_lower=f_l, distance_scale=distance_scale)\r\n File \"/home/max.trevor/dev_env/lib/python3.6/site-packages/PyCBC-e9f3da-py3.6-linux-x86_64.egg/pycbc/inject/inject.py\", line 557, in make_strain_from_inj_object\r\n **self.extra_args)\r\n File \"/home/max.trevor/dev_env/lib/python3.6/site-packages/PyCBC-e9f3da-py3.6-linux-x86_64.egg/pycbc/waveform/waveform.py\", line 462, in get_td_waveform\r\n (input_params['approximant']))\r\nValueError: Approximant b'SEOBNRv4' not available\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\nimport os\nimport sys\nfrom pycbc.io import FieldArray\nfrom pycbc.inject import InjectionSet\n\n\nif os.path.exists('./test_inj1.hdf'):\n raise OSError(\"output-file 1 already exists\")\n\nif os.path.exists('./test_inj2.hdf'):\n raise OSError(\"output-file 2 already exists\")\n\ndtype = [('mass1', float), ('mass2', float),\n ('spin1z', float), ('spin2z', float),\n ('tc', float), ('distance', float)]\n\n# injection 1\nstatic_params = {'f_lower': 18.0, 'f_ref': 18.0, 'approximant': 'SEOBNRv4',\n 'taper': 'start', 'ra': 45.0, 'dec': 45.0,\n 'inclination': 0.0, 'coa_phase': 0.0, 'polarization': 0.0}\n\nsamples = FieldArray(1, dtype=dtype)\n\n# The following 'magic numbers' are intended to match the highest\n# mass injection in the template bank\nsamples['mass1'] = [290.929321]\nsamples['mass2'] = [3.6755455]\nsamples['spin1z'] = [0.9934847]\nsamples['spin2z'] = [0.92713535]\nsamples['tc'] = [1272790100.1]\nsamples['distance'] = [301.5]\n\nInjectionSet.write('test_inj1.hdf', samples, static_args=static_params,\n injtype='cbc', cmd=\" \".join(sys.argv))\n\n# injection 2\nstatic_params['approximant'] = 'SpinTaylorT4'\n\nsamples = FieldArray(1, dtype=dtype)\n\n# The following 'magic numbers' are intended to match the lowest\n# mass injection in the template bank\nsamples['mass1'] = [1.1331687]\nsamples['mass2'] = [1.010624]\nsamples['spin1z'] = [0.029544285]\nsamples['spin2z'] = [0.020993788]\nsamples['tc'] = [1272790260.1]\nsamples['distance'] = [36.0]\n\nInjectionSet.write('test_inj2.hdf', samples, static_args=static_params,\n injtype='cbc', cmd=\" \".join(sys.argv))\n", "path": "examples/live/generate_injections.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\nimport sys\nfrom pycbc.io import FieldArray\nfrom pycbc.inject import InjectionSet\n\n\ndtype = [('mass1', float), ('mass2', float),\n ('spin1z', float), ('spin2z', float),\n ('tc', float), ('distance', float),\n ('approximant', 'S32')]\n\nstatic_params = {'f_lower': 18.0, 'f_ref': 18.0,\n 'taper': 'start', 'ra': 45.0, 'dec': 45.0,\n 'inclination': 0.0, 'coa_phase': 0.0, 'polarization': 0.0}\n\nsamples = FieldArray(2, dtype=dtype)\n\n# The following 'magic numbers' are intended to match the highest\n# and lowest mass templates in the template bank\nsamples['mass1'] = [290.929321, 1.1331687]\nsamples['mass2'] = [3.6755455, 1.010624]\nsamples['spin1z'] = [0.9934847, 0.029544285]\nsamples['spin2z'] = [0.92713535, 0.020993788]\nsamples['tc'] = [1272790100.1, 1272790260.1]\nsamples['distance'] = [301.5, 36.0]\nsamples['approximant'] = ['SEOBNRv4', 'SpinTaylorT4']\n\nInjectionSet.write('injections.hdf', samples, static_args=static_params,\n injtype='cbc', cmd=\" \".join(sys.argv))\n", "path": "examples/live/generate_injections.py"}]}
| 1,624 | 942 |
gh_patches_debug_11308
|
rasdani/github-patches
|
git_diff
|
scrapy__scrapy-2418
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
inspect_response(response) yields incorrect response in IPython shell
Example case (requires registration at example site, and even then would be hard to use as a use-case; modify to suit your needs): http://pastebin.com/GT8N893q
In the above example, the response.meta printout in after_submit callback does not match that within the inspect_response shell on the second iteration (the first is correct). It appears that inspect_response has a stale response the second time.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scrapy/utils/console.py`
Content:
```
1 from functools import wraps
2 from collections import OrderedDict
3
4 def _embed_ipython_shell(namespace={}, banner=''):
5 """Start an IPython Shell"""
6 try:
7 from IPython.terminal.embed import InteractiveShellEmbed
8 from IPython.terminal.ipapp import load_default_config
9 except ImportError:
10 from IPython.frontend.terminal.embed import InteractiveShellEmbed
11 from IPython.frontend.terminal.ipapp import load_default_config
12
13 @wraps(_embed_ipython_shell)
14 def wrapper(namespace=namespace, banner=''):
15 config = load_default_config()
16 # Always use .instace() to ensure _instance propagation to all parents
17 # this is needed for <TAB> completion works well for new imports
18 shell = InteractiveShellEmbed.instance(
19 banner1=banner, user_ns=namespace, config=config)
20 shell()
21 return wrapper
22
23 def _embed_bpython_shell(namespace={}, banner=''):
24 """Start a bpython shell"""
25 import bpython
26 @wraps(_embed_bpython_shell)
27 def wrapper(namespace=namespace, banner=''):
28 bpython.embed(locals_=namespace, banner=banner)
29 return wrapper
30
31 def _embed_standard_shell(namespace={}, banner=''):
32 """Start a standard python shell"""
33 import code
34 try: # readline module is only available on unix systems
35 import readline
36 except ImportError:
37 pass
38 else:
39 import rlcompleter
40 readline.parse_and_bind("tab:complete")
41 @wraps(_embed_standard_shell)
42 def wrapper(namespace=namespace, banner=''):
43 code.interact(banner=banner, local=namespace)
44 return wrapper
45
46 DEFAULT_PYTHON_SHELLS = OrderedDict([
47 ('ipython', _embed_ipython_shell),
48 ('bpython', _embed_bpython_shell),
49 ( 'python', _embed_standard_shell),
50 ])
51
52 def get_shell_embed_func(shells=None, known_shells=None):
53 """Return the first acceptable shell-embed function
54 from a given list of shell names.
55 """
56 if shells is None: # list, preference order of shells
57 shells = DEFAULT_PYTHON_SHELLS.keys()
58 if known_shells is None: # available embeddable shells
59 known_shells = DEFAULT_PYTHON_SHELLS.copy()
60 for shell in shells:
61 if shell in known_shells:
62 try:
63 # function test: run all setup code (imports),
64 # but dont fall into the shell
65 return known_shells[shell]()
66 except ImportError:
67 continue
68
69 def start_python_console(namespace=None, banner='', shells=None):
70 """Start Python console bound to the given namespace.
71 Readline support and tab completion will be used on Unix, if available.
72 """
73 if namespace is None:
74 namespace = {}
75
76 try:
77 shell = get_shell_embed_func(shells)
78 if shell is not None:
79 shell(namespace=namespace, banner=banner)
80 except SystemExit: # raised when using exit() in python code.interact
81 pass
82
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/scrapy/utils/console.py b/scrapy/utils/console.py
--- a/scrapy/utils/console.py
+++ b/scrapy/utils/console.py
@@ -15,6 +15,9 @@
config = load_default_config()
# Always use .instace() to ensure _instance propagation to all parents
# this is needed for <TAB> completion works well for new imports
+ # and clear the instance to always have the fresh env
+ # on repeated breaks like with inspect_response()
+ InteractiveShellEmbed.clear_instance()
shell = InteractiveShellEmbed.instance(
banner1=banner, user_ns=namespace, config=config)
shell()
|
{"golden_diff": "diff --git a/scrapy/utils/console.py b/scrapy/utils/console.py\n--- a/scrapy/utils/console.py\n+++ b/scrapy/utils/console.py\n@@ -15,6 +15,9 @@\n config = load_default_config()\n # Always use .instace() to ensure _instance propagation to all parents\n # this is needed for <TAB> completion works well for new imports\n+ # and clear the instance to always have the fresh env\n+ # on repeated breaks like with inspect_response()\n+ InteractiveShellEmbed.clear_instance()\n shell = InteractiveShellEmbed.instance(\n banner1=banner, user_ns=namespace, config=config)\n shell()\n", "issue": "inspect_response(response) yields incorrect response in IPython shell\nExample case (requires registration at example site, and even then would be hard to use as a use-case; modify to suit your needs): http://pastebin.com/GT8N893q\n\nIn the above example, the response.meta printout in after_submit callback does not match that within the inspect_response shell on the second iteration (the first is correct). It appears that inspect_response has a stale response the second time.\n\n", "before_files": [{"content": "from functools import wraps\nfrom collections import OrderedDict\n\ndef _embed_ipython_shell(namespace={}, banner=''):\n \"\"\"Start an IPython Shell\"\"\"\n try:\n from IPython.terminal.embed import InteractiveShellEmbed\n from IPython.terminal.ipapp import load_default_config\n except ImportError:\n from IPython.frontend.terminal.embed import InteractiveShellEmbed\n from IPython.frontend.terminal.ipapp import load_default_config\n\n @wraps(_embed_ipython_shell)\n def wrapper(namespace=namespace, banner=''):\n config = load_default_config()\n # Always use .instace() to ensure _instance propagation to all parents\n # this is needed for <TAB> completion works well for new imports\n shell = InteractiveShellEmbed.instance(\n banner1=banner, user_ns=namespace, config=config)\n shell()\n return wrapper\n\ndef _embed_bpython_shell(namespace={}, banner=''):\n \"\"\"Start a bpython shell\"\"\"\n import bpython\n @wraps(_embed_bpython_shell)\n def wrapper(namespace=namespace, banner=''):\n bpython.embed(locals_=namespace, banner=banner)\n return wrapper\n\ndef _embed_standard_shell(namespace={}, banner=''):\n \"\"\"Start a standard python shell\"\"\"\n import code\n try: # readline module is only available on unix systems\n import readline\n except ImportError:\n pass\n else:\n import rlcompleter\n readline.parse_and_bind(\"tab:complete\")\n @wraps(_embed_standard_shell)\n def wrapper(namespace=namespace, banner=''):\n code.interact(banner=banner, local=namespace)\n return wrapper\n\nDEFAULT_PYTHON_SHELLS = OrderedDict([\n ('ipython', _embed_ipython_shell),\n ('bpython', _embed_bpython_shell),\n ( 'python', _embed_standard_shell),\n])\n\ndef get_shell_embed_func(shells=None, known_shells=None):\n \"\"\"Return the first acceptable shell-embed function\n from a given list of shell names.\n \"\"\"\n if shells is None: # list, preference order of shells\n shells = DEFAULT_PYTHON_SHELLS.keys()\n if known_shells is None: # available embeddable shells\n known_shells = DEFAULT_PYTHON_SHELLS.copy()\n for shell in shells:\n if shell in known_shells:\n try:\n # function test: run all setup code (imports),\n # but dont fall into the shell\n return known_shells[shell]()\n except ImportError:\n continue\n\ndef start_python_console(namespace=None, banner='', shells=None):\n \"\"\"Start Python console bound to the given namespace.\n Readline support and tab completion will be used on Unix, if available.\n \"\"\"\n if namespace is None:\n namespace = {}\n\n try:\n shell = get_shell_embed_func(shells)\n if shell is not None:\n shell(namespace=namespace, banner=banner)\n except SystemExit: # raised when using exit() in python code.interact\n pass\n", "path": "scrapy/utils/console.py"}], "after_files": [{"content": "from functools import wraps\nfrom collections import OrderedDict\n\ndef _embed_ipython_shell(namespace={}, banner=''):\n \"\"\"Start an IPython Shell\"\"\"\n try:\n from IPython.terminal.embed import InteractiveShellEmbed\n from IPython.terminal.ipapp import load_default_config\n except ImportError:\n from IPython.frontend.terminal.embed import InteractiveShellEmbed\n from IPython.frontend.terminal.ipapp import load_default_config\n\n @wraps(_embed_ipython_shell)\n def wrapper(namespace=namespace, banner=''):\n config = load_default_config()\n # Always use .instace() to ensure _instance propagation to all parents\n # this is needed for <TAB> completion works well for new imports\n # and clear the instance to always have the fresh env\n # on repeated breaks like with inspect_response()\n InteractiveShellEmbed.clear_instance()\n shell = InteractiveShellEmbed.instance(\n banner1=banner, user_ns=namespace, config=config)\n shell()\n return wrapper\n\ndef _embed_bpython_shell(namespace={}, banner=''):\n \"\"\"Start a bpython shell\"\"\"\n import bpython\n @wraps(_embed_bpython_shell)\n def wrapper(namespace=namespace, banner=''):\n bpython.embed(locals_=namespace, banner=banner)\n return wrapper\n\ndef _embed_standard_shell(namespace={}, banner=''):\n \"\"\"Start a standard python shell\"\"\"\n import code\n try: # readline module is only available on unix systems\n import readline\n except ImportError:\n pass\n else:\n import rlcompleter\n readline.parse_and_bind(\"tab:complete\")\n @wraps(_embed_standard_shell)\n def wrapper(namespace=namespace, banner=''):\n code.interact(banner=banner, local=namespace)\n return wrapper\n\nDEFAULT_PYTHON_SHELLS = OrderedDict([\n ('ipython', _embed_ipython_shell),\n ('bpython', _embed_bpython_shell),\n ( 'python', _embed_standard_shell),\n])\n\ndef get_shell_embed_func(shells=None, known_shells=None):\n \"\"\"Return the first acceptable shell-embed function\n from a given list of shell names.\n \"\"\"\n if shells is None: # list, preference order of shells\n shells = DEFAULT_PYTHON_SHELLS.keys()\n if known_shells is None: # available embeddable shells\n known_shells = DEFAULT_PYTHON_SHELLS.copy()\n for shell in shells:\n if shell in known_shells:\n try:\n # function test: run all setup code (imports),\n # but dont fall into the shell\n return known_shells[shell]()\n except ImportError:\n continue\n\ndef start_python_console(namespace=None, banner='', shells=None):\n \"\"\"Start Python console bound to the given namespace.\n Readline support and tab completion will be used on Unix, if available.\n \"\"\"\n if namespace is None:\n namespace = {}\n\n try:\n shell = get_shell_embed_func(shells)\n if shell is not None:\n shell(namespace=namespace, banner=banner)\n except SystemExit: # raised when using exit() in python code.interact\n pass\n", "path": "scrapy/utils/console.py"}]}
| 1,144 | 142 |
gh_patches_debug_26537
|
rasdani/github-patches
|
git_diff
|
cookiecutter__cookiecutter-1297
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add support of python 3.8
- [x] Add to travis
- [x] Add to appveyor
- [x] Add to tox
- [x] Add to setup.py
- [x] Add to docs
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3
4 """cookiecutter distutils configuration"""
5
6 import os
7 import io
8 import sys
9
10 from setuptools import setup
11
12 version = "1.7.0"
13
14 if sys.argv[-1] == 'publish':
15 os.system('python setup.py sdist upload')
16 os.system('python setup.py bdist_wheel upload')
17 sys.exit()
18
19 if sys.argv[-1] == 'tag':
20 os.system("git tag -a %s -m 'version %s'" % (version, version))
21 os.system("git push --tags")
22 sys.exit()
23
24 with io.open('README.md', 'r', encoding='utf-8') as readme_file:
25 readme = readme_file.read()
26
27 requirements = [
28 'binaryornot>=0.2.0',
29 'jinja2>=2.7',
30 'click>=7.0',
31 'poyo>=0.1.0',
32 'jinja2-time>=0.1.0',
33 'requests>=2.18.0',
34 'six>=1.10',
35 ]
36
37 if sys.argv[-1] == 'readme':
38 print(readme)
39 sys.exit()
40
41
42 setup(
43 name='cookiecutter',
44 version=version,
45 description=('A command-line utility that creates projects from project '
46 'templates, e.g. creating a Python package project from a '
47 'Python package project template.'),
48 long_description=readme,
49 long_description_content_type='text/markdown',
50 author='Audrey Roy',
51 author_email='[email protected]',
52 url='https://github.com/cookiecutter/cookiecutter',
53 packages=[
54 'cookiecutter',
55 ],
56 package_dir={'cookiecutter': 'cookiecutter'},
57 entry_points={
58 'console_scripts': [
59 'cookiecutter = cookiecutter.__main__:main',
60 ]
61 },
62 include_package_data=True,
63 python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*',
64 install_requires=requirements,
65 extras_require={
66 ':python_version<"3.3"': ['whichcraft>=0.4.0'],
67 },
68 license='BSD',
69 zip_safe=False,
70 classifiers=[
71 'Development Status :: 5 - Production/Stable',
72 'Environment :: Console',
73 'Intended Audience :: Developers',
74 'Natural Language :: English',
75 'License :: OSI Approved :: BSD License',
76 'Programming Language :: Python',
77 'Programming Language :: Python :: 2',
78 'Programming Language :: Python :: 2.7',
79 'Programming Language :: Python :: 3',
80 'Programming Language :: Python :: 3.5',
81 'Programming Language :: Python :: 3.6',
82 'Programming Language :: Python :: 3.7',
83 'Programming Language :: Python :: Implementation :: CPython',
84 'Programming Language :: Python :: Implementation :: PyPy',
85 'Topic :: Software Development',
86 ],
87 keywords=(
88 'cookiecutter, Python, projects, project templates, Jinja2, '
89 'skeleton, scaffolding, project directory, setup.py, package, '
90 'packaging'
91 ),
92 )
93
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -68,21 +68,22 @@
license='BSD',
zip_safe=False,
classifiers=[
- 'Development Status :: 5 - Production/Stable',
- 'Environment :: Console',
- 'Intended Audience :: Developers',
- 'Natural Language :: English',
- 'License :: OSI Approved :: BSD License',
- 'Programming Language :: Python',
- 'Programming Language :: Python :: 2',
- 'Programming Language :: Python :: 2.7',
- 'Programming Language :: Python :: 3',
- 'Programming Language :: Python :: 3.5',
- 'Programming Language :: Python :: 3.6',
- 'Programming Language :: Python :: 3.7',
- 'Programming Language :: Python :: Implementation :: CPython',
- 'Programming Language :: Python :: Implementation :: PyPy',
- 'Topic :: Software Development',
+ "Development Status :: 5 - Production/Stable",
+ "Environment :: Console",
+ "Intended Audience :: Developers",
+ "Natural Language :: English",
+ "License :: OSI Approved :: BSD License",
+ "Programming Language :: Python",
+ "Programming Language :: Python :: 2",
+ "Programming Language :: Python :: 2.7",
+ "Programming Language :: Python :: 3",
+ "Programming Language :: Python :: 3.5",
+ "Programming Language :: Python :: 3.6",
+ "Programming Language :: Python :: 3.7",
+ "Programming Language :: Python :: 3.8",
+ "Programming Language :: Python :: Implementation :: CPython",
+ "Programming Language :: Python :: Implementation :: PyPy",
+ "Topic :: Software Development",
],
keywords=(
'cookiecutter, Python, projects, project templates, Jinja2, '
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -68,21 +68,22 @@\n license='BSD',\n zip_safe=False,\n classifiers=[\n- 'Development Status :: 5 - Production/Stable',\n- 'Environment :: Console',\n- 'Intended Audience :: Developers',\n- 'Natural Language :: English',\n- 'License :: OSI Approved :: BSD License',\n- 'Programming Language :: Python',\n- 'Programming Language :: Python :: 2',\n- 'Programming Language :: Python :: 2.7',\n- 'Programming Language :: Python :: 3',\n- 'Programming Language :: Python :: 3.5',\n- 'Programming Language :: Python :: 3.6',\n- 'Programming Language :: Python :: 3.7',\n- 'Programming Language :: Python :: Implementation :: CPython',\n- 'Programming Language :: Python :: Implementation :: PyPy',\n- 'Topic :: Software Development',\n+ \"Development Status :: 5 - Production/Stable\",\n+ \"Environment :: Console\",\n+ \"Intended Audience :: Developers\",\n+ \"Natural Language :: English\",\n+ \"License :: OSI Approved :: BSD License\",\n+ \"Programming Language :: Python\",\n+ \"Programming Language :: Python :: 2\",\n+ \"Programming Language :: Python :: 2.7\",\n+ \"Programming Language :: Python :: 3\",\n+ \"Programming Language :: Python :: 3.5\",\n+ \"Programming Language :: Python :: 3.6\",\n+ \"Programming Language :: Python :: 3.7\",\n+ \"Programming Language :: Python :: 3.8\",\n+ \"Programming Language :: Python :: Implementation :: CPython\",\n+ \"Programming Language :: Python :: Implementation :: PyPy\",\n+ \"Topic :: Software Development\",\n ],\n keywords=(\n 'cookiecutter, Python, projects, project templates, Jinja2, '\n", "issue": "Add support of python 3.8\n- [x] Add to travis\r\n- [x] Add to appveyor\r\n- [x] Add to tox\r\n- [x] Add to setup.py\r\n- [x] Add to docs\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"cookiecutter distutils configuration\"\"\"\n\nimport os\nimport io\nimport sys\n\nfrom setuptools import setup\n\nversion = \"1.7.0\"\n\nif sys.argv[-1] == 'publish':\n os.system('python setup.py sdist upload')\n os.system('python setup.py bdist_wheel upload')\n sys.exit()\n\nif sys.argv[-1] == 'tag':\n os.system(\"git tag -a %s -m 'version %s'\" % (version, version))\n os.system(\"git push --tags\")\n sys.exit()\n\nwith io.open('README.md', 'r', encoding='utf-8') as readme_file:\n readme = readme_file.read()\n\nrequirements = [\n 'binaryornot>=0.2.0',\n 'jinja2>=2.7',\n 'click>=7.0',\n 'poyo>=0.1.0',\n 'jinja2-time>=0.1.0',\n 'requests>=2.18.0',\n 'six>=1.10',\n]\n\nif sys.argv[-1] == 'readme':\n print(readme)\n sys.exit()\n\n\nsetup(\n name='cookiecutter',\n version=version,\n description=('A command-line utility that creates projects from project '\n 'templates, e.g. creating a Python package project from a '\n 'Python package project template.'),\n long_description=readme,\n long_description_content_type='text/markdown',\n author='Audrey Roy',\n author_email='[email protected]',\n url='https://github.com/cookiecutter/cookiecutter',\n packages=[\n 'cookiecutter',\n ],\n package_dir={'cookiecutter': 'cookiecutter'},\n entry_points={\n 'console_scripts': [\n 'cookiecutter = cookiecutter.__main__:main',\n ]\n },\n include_package_data=True,\n python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*',\n install_requires=requirements,\n extras_require={\n ':python_version<\"3.3\"': ['whichcraft>=0.4.0'],\n },\n license='BSD',\n zip_safe=False,\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'Natural Language :: English',\n 'License :: OSI Approved :: BSD License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Programming Language :: Python :: Implementation :: PyPy',\n 'Topic :: Software Development',\n ],\n keywords=(\n 'cookiecutter, Python, projects, project templates, Jinja2, '\n 'skeleton, scaffolding, project directory, setup.py, package, '\n 'packaging'\n ),\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"cookiecutter distutils configuration\"\"\"\n\nimport os\nimport io\nimport sys\n\nfrom setuptools import setup\n\nversion = \"1.7.0\"\n\nif sys.argv[-1] == 'publish':\n os.system('python setup.py sdist upload')\n os.system('python setup.py bdist_wheel upload')\n sys.exit()\n\nif sys.argv[-1] == 'tag':\n os.system(\"git tag -a %s -m 'version %s'\" % (version, version))\n os.system(\"git push --tags\")\n sys.exit()\n\nwith io.open('README.md', 'r', encoding='utf-8') as readme_file:\n readme = readme_file.read()\n\nrequirements = [\n 'binaryornot>=0.2.0',\n 'jinja2>=2.7',\n 'click>=7.0',\n 'poyo>=0.1.0',\n 'jinja2-time>=0.1.0',\n 'requests>=2.18.0',\n 'six>=1.10',\n]\n\nif sys.argv[-1] == 'readme':\n print(readme)\n sys.exit()\n\n\nsetup(\n name='cookiecutter',\n version=version,\n description=('A command-line utility that creates projects from project '\n 'templates, e.g. creating a Python package project from a '\n 'Python package project template.'),\n long_description=readme,\n long_description_content_type='text/markdown',\n author='Audrey Roy',\n author_email='[email protected]',\n url='https://github.com/cookiecutter/cookiecutter',\n packages=[\n 'cookiecutter',\n ],\n package_dir={'cookiecutter': 'cookiecutter'},\n entry_points={\n 'console_scripts': [\n 'cookiecutter = cookiecutter.__main__:main',\n ]\n },\n include_package_data=True,\n python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*',\n install_requires=requirements,\n extras_require={\n ':python_version<\"3.3\"': ['whichcraft>=0.4.0'],\n },\n license='BSD',\n zip_safe=False,\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Console\",\n \"Intended Audience :: Developers\",\n \"Natural Language :: English\",\n \"License :: OSI Approved :: BSD License\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n \"Topic :: Software Development\",\n ],\n keywords=(\n 'cookiecutter, Python, projects, project templates, Jinja2, '\n 'skeleton, scaffolding, project directory, setup.py, package, '\n 'packaging'\n ),\n)\n", "path": "setup.py"}]}
| 1,183 | 411 |
gh_patches_debug_9915
|
rasdani/github-patches
|
git_diff
|
mars-project__mars-61
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
make the fuse logic separated out of graph
For now, the fuse logic is adhere to the graph.pyx, we should separate the logic out of graph, and make it standalone, so we can do some unit test on the fuse.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 # Copyright 1999-2017 Alibaba Group Holding Ltd.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import os
16 import sys
17 from setuptools import setup, find_packages, Extension
18
19 import numpy as np
20 from Cython.Build import cythonize
21 from Cython.Distutils import build_ext
22
23 repo_root = os.path.dirname(os.path.abspath(__file__))
24
25 try:
26 execfile
27 except NameError:
28 def execfile(fname, globs, locs=None):
29 locs = locs or globs
30 exec(compile(open(fname).read(), fname, "exec"), globs, locs)
31
32 version_file_path = os.path.join(repo_root, 'mars', '_version.py')
33 version_ns = {'__file__': version_file_path}
34 execfile(version_file_path, version_ns)
35
36 requirements = []
37 with open(os.path.join(repo_root, 'requirements.txt'), 'r') as f:
38 requirements.extend(f.read().splitlines())
39
40
41 extra_requirements=[]
42 with open(os.path.join(repo_root, 'requirements-extra.txt'), 'r') as f:
43 extra_requirements.extend(f.read().splitlines())
44
45
46 long_description = None
47 if os.path.exists(os.path.join(repo_root, 'README.rst')):
48 with open(os.path.join(repo_root, 'README.rst')) as f:
49 long_description = f.read()
50
51
52 if os.path.exists(os.path.join(repo_root, '.git')):
53 git_info = version_ns['get_git_info']()
54 if git_info:
55 with open(os.path.join(repo_root, 'mars', '.git-branch'), 'w') as git_file:
56 git_file.write('%s %s' % git_info)
57
58
59 if 'CI_MODE' in os.environ:
60 for root, dirs, files in os.walk(repo_root):
61 for fn in files:
62 if not fn.endswith('.pyx'):
63 continue
64 path = os.path.join(root, fn)
65 with open(path, 'rb') as f:
66 src = f.read()
67 with open(path, 'wb') as f:
68 f.write(b'# cython: linetrace=True' + os.linesep.encode('utf-8') + src)
69
70
71 if 'MSC' in sys.version:
72 extra_compile_args = ['/Ot', '/I' + os.path.join(repo_root, 'misc')]
73 if 'CI_MODE' in os.environ:
74 extra_compile_args.extend(['/DCYTHON_TRACE_NOGIL=#1', '/DCYTHON_TRACE=#1'])
75 extension_kw = {'extra_compile_args': extra_compile_args}
76 else:
77 extra_compile_args = ['-O3']
78 if 'CI_MODE' in os.environ:
79 extra_compile_args.extend(['-DCYTHON_TRACE_NOGIL=1', '-DCYTHON_TRACE=1'])
80 extension_kw = {'extra_compile_args': extra_compile_args}
81 extension_kw['include_dirs'] = [np.get_include()]
82 extensions = [
83 Extension('mars.graph', ['mars/graph.pyx'], **extension_kw),
84 Extension('mars.utils_c', ['mars/utils_c.pyx'], **extension_kw),
85 Extension('mars.lib.gipc', ['mars/lib/gipc.pyx'], **extension_kw),
86 Extension('mars.actors.core', ['mars/actors/core.pyx'], **extension_kw),
87 Extension('mars.actors.distributor', ['mars/actors/distributor.pyx'], **extension_kw),
88 Extension('mars.actors.cluster', ['mars/actors/cluster.pyx'], **extension_kw),
89 Extension('mars.actors.pool.messages', ['mars/actors/pool/messages.pyx'], **extension_kw),
90 Extension('mars.actors.pool.utils', ['mars/actors/pool/utils.pyx'], **extension_kw),
91 Extension('mars.actors.pool.gevent_pool', ['mars/actors/pool/gevent_pool.pyx'], **extension_kw),
92 Extension('mars.serialize.core', ['mars/serialize/core.pyx'], **extension_kw),
93 Extension('mars.serialize.pbserializer', ['mars/serialize/pbserializer.pyx'], **extension_kw),
94 Extension('mars.serialize.jsonserializer', ['mars/serialize/jsonserializer.pyx'], **extension_kw),
95 ]
96
97
98 setup_options = dict(
99 name='pymars',
100 version=version_ns['__version__'],
101 description='MARS: a tensor-based unified framework for large-scale data computation.',
102 long_description=long_description,
103 author='Qin Xuye',
104 author_email='[email protected]',
105 maintainer='Qin Xuye',
106 maintainer_email='[email protected]',
107 url='http://github.com/mars-project/mars',
108 license='Apache License 2.0',
109 classifiers=[
110 'Operating System :: OS Independent',
111 'Programming Language :: Python',
112 'Programming Language :: Python :: 2',
113 'Programming Language :: Python :: 2.7',
114 'Programming Language :: Python :: 3',
115 'Programming Language :: Python :: 3.5',
116 'Programming Language :: Python :: 3.6',
117 'Programming Language :: Python :: 3.7',
118 'Programming Language :: Python :: Implementation :: CPython',
119 'Topic :: Software Development :: Libraries',
120 ],
121 packages=find_packages(exclude=('*.tests.*', '*.tests')),
122 include_package_data=True,
123 scripts=['scripts/mars-scheduler', 'scripts/mars-worker', 'scripts/mars-web'],
124 install_requires=requirements,
125 cmdclass={'build_ext': build_ext},
126 ext_modules=cythonize(extensions),
127 extras_require={'distributed': extra_requirements}
128 )
129 setup(**setup_options)
130
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -81,6 +81,7 @@
extension_kw['include_dirs'] = [np.get_include()]
extensions = [
Extension('mars.graph', ['mars/graph.pyx'], **extension_kw),
+ Extension('mars.fuse', ['mars/fuse.pyx'], **extension_kw),
Extension('mars.utils_c', ['mars/utils_c.pyx'], **extension_kw),
Extension('mars.lib.gipc', ['mars/lib/gipc.pyx'], **extension_kw),
Extension('mars.actors.core', ['mars/actors/core.pyx'], **extension_kw),
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -81,6 +81,7 @@\n extension_kw['include_dirs'] = [np.get_include()]\n extensions = [\n Extension('mars.graph', ['mars/graph.pyx'], **extension_kw),\n+ Extension('mars.fuse', ['mars/fuse.pyx'], **extension_kw),\n Extension('mars.utils_c', ['mars/utils_c.pyx'], **extension_kw),\n Extension('mars.lib.gipc', ['mars/lib/gipc.pyx'], **extension_kw),\n Extension('mars.actors.core', ['mars/actors/core.pyx'], **extension_kw),\n", "issue": "make the fuse logic separated out of graph\nFor now, the fuse logic is adhere to the graph.pyx, we should separate the logic out of graph, and make it standalone, so we can do some unit test on the fuse.\n", "before_files": [{"content": "# Copyright 1999-2017 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport sys\nfrom setuptools import setup, find_packages, Extension\n\nimport numpy as np\nfrom Cython.Build import cythonize\nfrom Cython.Distutils import build_ext\n\nrepo_root = os.path.dirname(os.path.abspath(__file__))\n\ntry:\n execfile\nexcept NameError:\n def execfile(fname, globs, locs=None):\n locs = locs or globs\n exec(compile(open(fname).read(), fname, \"exec\"), globs, locs)\n\nversion_file_path = os.path.join(repo_root, 'mars', '_version.py')\nversion_ns = {'__file__': version_file_path}\nexecfile(version_file_path, version_ns)\n\nrequirements = []\nwith open(os.path.join(repo_root, 'requirements.txt'), 'r') as f:\n requirements.extend(f.read().splitlines())\n\n\nextra_requirements=[]\nwith open(os.path.join(repo_root, 'requirements-extra.txt'), 'r') as f:\n extra_requirements.extend(f.read().splitlines())\n\n\nlong_description = None\nif os.path.exists(os.path.join(repo_root, 'README.rst')):\n with open(os.path.join(repo_root, 'README.rst')) as f:\n long_description = f.read()\n\n\nif os.path.exists(os.path.join(repo_root, '.git')):\n git_info = version_ns['get_git_info']()\n if git_info:\n with open(os.path.join(repo_root, 'mars', '.git-branch'), 'w') as git_file:\n git_file.write('%s %s' % git_info)\n\n\nif 'CI_MODE' in os.environ:\n for root, dirs, files in os.walk(repo_root):\n for fn in files:\n if not fn.endswith('.pyx'):\n continue\n path = os.path.join(root, fn)\n with open(path, 'rb') as f:\n src = f.read()\n with open(path, 'wb') as f:\n f.write(b'# cython: linetrace=True' + os.linesep.encode('utf-8') + src)\n\n\nif 'MSC' in sys.version:\n extra_compile_args = ['/Ot', '/I' + os.path.join(repo_root, 'misc')]\n if 'CI_MODE' in os.environ:\n extra_compile_args.extend(['/DCYTHON_TRACE_NOGIL=#1', '/DCYTHON_TRACE=#1'])\n extension_kw = {'extra_compile_args': extra_compile_args}\nelse:\n extra_compile_args = ['-O3']\n if 'CI_MODE' in os.environ:\n extra_compile_args.extend(['-DCYTHON_TRACE_NOGIL=1', '-DCYTHON_TRACE=1'])\n extension_kw = {'extra_compile_args': extra_compile_args}\nextension_kw['include_dirs'] = [np.get_include()]\nextensions = [\n Extension('mars.graph', ['mars/graph.pyx'], **extension_kw),\n Extension('mars.utils_c', ['mars/utils_c.pyx'], **extension_kw),\n Extension('mars.lib.gipc', ['mars/lib/gipc.pyx'], **extension_kw),\n Extension('mars.actors.core', ['mars/actors/core.pyx'], **extension_kw),\n Extension('mars.actors.distributor', ['mars/actors/distributor.pyx'], **extension_kw),\n Extension('mars.actors.cluster', ['mars/actors/cluster.pyx'], **extension_kw),\n Extension('mars.actors.pool.messages', ['mars/actors/pool/messages.pyx'], **extension_kw),\n Extension('mars.actors.pool.utils', ['mars/actors/pool/utils.pyx'], **extension_kw),\n Extension('mars.actors.pool.gevent_pool', ['mars/actors/pool/gevent_pool.pyx'], **extension_kw),\n Extension('mars.serialize.core', ['mars/serialize/core.pyx'], **extension_kw),\n Extension('mars.serialize.pbserializer', ['mars/serialize/pbserializer.pyx'], **extension_kw),\n Extension('mars.serialize.jsonserializer', ['mars/serialize/jsonserializer.pyx'], **extension_kw),\n]\n\n\nsetup_options = dict(\n name='pymars',\n version=version_ns['__version__'],\n description='MARS: a tensor-based unified framework for large-scale data computation.',\n long_description=long_description,\n author='Qin Xuye',\n author_email='[email protected]',\n maintainer='Qin Xuye',\n maintainer_email='[email protected]',\n url='http://github.com/mars-project/mars',\n license='Apache License 2.0',\n classifiers=[\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Topic :: Software Development :: Libraries',\n ],\n packages=find_packages(exclude=('*.tests.*', '*.tests')),\n include_package_data=True,\n scripts=['scripts/mars-scheduler', 'scripts/mars-worker', 'scripts/mars-web'],\n install_requires=requirements,\n cmdclass={'build_ext': build_ext},\n ext_modules=cythonize(extensions),\n extras_require={'distributed': extra_requirements}\n)\nsetup(**setup_options)\n", "path": "setup.py"}], "after_files": [{"content": "# Copyright 1999-2017 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport sys\nfrom setuptools import setup, find_packages, Extension\n\nimport numpy as np\nfrom Cython.Build import cythonize\nfrom Cython.Distutils import build_ext\n\nrepo_root = os.path.dirname(os.path.abspath(__file__))\n\ntry:\n execfile\nexcept NameError:\n def execfile(fname, globs, locs=None):\n locs = locs or globs\n exec(compile(open(fname).read(), fname, \"exec\"), globs, locs)\n\nversion_file_path = os.path.join(repo_root, 'mars', '_version.py')\nversion_ns = {'__file__': version_file_path}\nexecfile(version_file_path, version_ns)\n\nrequirements = []\nwith open(os.path.join(repo_root, 'requirements.txt'), 'r') as f:\n requirements.extend(f.read().splitlines())\n\n\nextra_requirements=[]\nwith open(os.path.join(repo_root, 'requirements-extra.txt'), 'r') as f:\n extra_requirements.extend(f.read().splitlines())\n\n\nlong_description = None\nif os.path.exists(os.path.join(repo_root, 'README.rst')):\n with open(os.path.join(repo_root, 'README.rst')) as f:\n long_description = f.read()\n\n\nif os.path.exists(os.path.join(repo_root, '.git')):\n git_info = version_ns['get_git_info']()\n if git_info:\n with open(os.path.join(repo_root, 'mars', '.git-branch'), 'w') as git_file:\n git_file.write('%s %s' % git_info)\n\n\nif 'CI_MODE' in os.environ:\n for root, dirs, files in os.walk(repo_root):\n for fn in files:\n if not fn.endswith('.pyx'):\n continue\n path = os.path.join(root, fn)\n with open(path, 'rb') as f:\n src = f.read()\n with open(path, 'wb') as f:\n f.write(b'# cython: linetrace=True' + os.linesep.encode('utf-8') + src)\n\n\nif 'MSC' in sys.version:\n extra_compile_args = ['/Ot', '/I' + os.path.join(repo_root, 'misc')]\n if 'CI_MODE' in os.environ:\n extra_compile_args.extend(['/DCYTHON_TRACE_NOGIL=#1', '/DCYTHON_TRACE=#1'])\n extension_kw = {'extra_compile_args': extra_compile_args}\nelse:\n extra_compile_args = ['-O3']\n if 'CI_MODE' in os.environ:\n extra_compile_args.extend(['-DCYTHON_TRACE_NOGIL=1', '-DCYTHON_TRACE=1'])\n extension_kw = {'extra_compile_args': extra_compile_args}\nextension_kw['include_dirs'] = [np.get_include()]\nextensions = [\n Extension('mars.graph', ['mars/graph.pyx'], **extension_kw),\n Extension('mars.fuse', ['mars/fuse.pyx'], **extension_kw),\n Extension('mars.utils_c', ['mars/utils_c.pyx'], **extension_kw),\n Extension('mars.lib.gipc', ['mars/lib/gipc.pyx'], **extension_kw),\n Extension('mars.actors.core', ['mars/actors/core.pyx'], **extension_kw),\n Extension('mars.actors.distributor', ['mars/actors/distributor.pyx'], **extension_kw),\n Extension('mars.actors.cluster', ['mars/actors/cluster.pyx'], **extension_kw),\n Extension('mars.actors.pool.messages', ['mars/actors/pool/messages.pyx'], **extension_kw),\n Extension('mars.actors.pool.utils', ['mars/actors/pool/utils.pyx'], **extension_kw),\n Extension('mars.actors.pool.gevent_pool', ['mars/actors/pool/gevent_pool.pyx'], **extension_kw),\n Extension('mars.serialize.core', ['mars/serialize/core.pyx'], **extension_kw),\n Extension('mars.serialize.pbserializer', ['mars/serialize/pbserializer.pyx'], **extension_kw),\n Extension('mars.serialize.jsonserializer', ['mars/serialize/jsonserializer.pyx'], **extension_kw),\n]\n\n\nsetup_options = dict(\n name='pymars',\n version=version_ns['__version__'],\n description='MARS: a tensor-based unified framework for large-scale data computation.',\n long_description=long_description,\n author='Qin Xuye',\n author_email='[email protected]',\n maintainer='Qin Xuye',\n maintainer_email='[email protected]',\n url='http://github.com/mars-project/mars',\n license='Apache License 2.0',\n classifiers=[\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Topic :: Software Development :: Libraries',\n ],\n packages=find_packages(exclude=('*.tests.*', '*.tests')),\n include_package_data=True,\n scripts=['scripts/mars-scheduler', 'scripts/mars-worker', 'scripts/mars-web'],\n install_requires=requirements,\n cmdclass={'build_ext': build_ext},\n ext_modules=cythonize(extensions),\n extras_require={'distributed': extra_requirements}\n)\nsetup(**setup_options)\n", "path": "setup.py"}]}
| 1,864 | 140 |
gh_patches_debug_23369
|
rasdani/github-patches
|
git_diff
|
web2py__web2py-1294
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
empty strings stored as NULL in db
https://groups.google.com/forum/#!topic/web2py/IYzhhd3eKXQ
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `gluon/dal.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3
4 """
5 | This file is part of the web2py Web Framework
6 | Copyrighted by Massimo Di Pierro <[email protected]>
7 | License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)
8
9 Takes care of adapting pyDAL to web2py's needs
10 -----------------------------------------------
11 """
12
13 from pydal import DAL as DAL
14 from pydal import Field
15 from pydal.objects import Row, Rows, Table, Query, Set, Expression
16 from pydal import SQLCustomType, geoPoint, geoLine, geoPolygon
17
18
19 def _default_validators(db, field):
20 """
21 Field type validation, using web2py's validators mechanism.
22
23 makes sure the content of a field is in line with the declared
24 fieldtype
25 """
26 from gluon import validators
27 field_type, field_length = field.type, field.length
28 requires = []
29
30 if field_type in (('string', 'text', 'password')):
31 requires.append(validators.IS_LENGTH(field_length))
32 elif field_type == 'json':
33 requires.append(validators.IS_EMPTY_OR(validators.IS_JSON()))
34 elif field_type == 'double' or field_type == 'float':
35 requires.append(validators.IS_FLOAT_IN_RANGE(-1e100, 1e100))
36 elif field_type == 'integer':
37 requires.append(validators.IS_INT_IN_RANGE(-2**31, 2**31))
38 elif field_type == 'bigint':
39 requires.append(validators.IS_INT_IN_RANGE(-2**63, 2**63))
40 elif field_type.startswith('decimal'):
41 requires.append(validators.IS_DECIMAL_IN_RANGE(-10**10, 10**10))
42 elif field_type == 'date':
43 requires.append(validators.IS_DATE())
44 elif field_type == 'time':
45 requires.append(validators.IS_TIME())
46 elif field_type == 'datetime':
47 requires.append(validators.IS_DATETIME())
48 elif db and field_type.startswith('reference') and \
49 field_type.find('.') < 0 and \
50 field_type[10:] in db.tables:
51 referenced = db[field_type[10:]]
52 if hasattr(referenced, '_format') and referenced._format:
53 requires = validators.IS_IN_DB(db, referenced._id,
54 referenced._format)
55 if field.unique:
56 requires._and = validators.IS_NOT_IN_DB(db, field)
57 if field.tablename == field_type[10:]:
58 return validators.IS_EMPTY_OR(requires)
59 return requires
60 elif db and field_type.startswith('list:reference') and \
61 field_type.find('.') < 0 and \
62 field_type[15:] in db.tables:
63 referenced = db[field_type[15:]]
64 if hasattr(referenced, '_format') and referenced._format:
65 requires = validators.IS_IN_DB(db, referenced._id,
66 referenced._format, multiple=True)
67 else:
68 requires = validators.IS_IN_DB(db, referenced._id,
69 multiple=True)
70 if field.unique:
71 requires._and = validators.IS_NOT_IN_DB(db, field)
72 if not field.notnull:
73 requires = validators.IS_EMPTY_OR(requires)
74 return requires
75 # does not get here for reference and list:reference
76 if field.unique:
77 requires.insert(0,validators.IS_NOT_IN_DB(db, field))
78 excluded_fields = ['string','upload','text','password','boolean']
79 if (field.notnull or field.unique) and not field_type in excluded_fields:
80 requires.insert(0,validators.IS_NOT_EMPTY())
81 elif not field.notnull and not field.unique and requires:
82 requires[0] = validators.IS_EMPTY_OR(requires[0])
83 return requires
84
85 from gluon.serializers import custom_json, xml
86 from gluon.utils import web2py_uuid
87 from gluon import sqlhtml
88
89
90 DAL.serializers = {'json': custom_json, 'xml': xml}
91 DAL.validators_method = _default_validators
92 DAL.uuid = lambda x: web2py_uuid()
93 DAL.representers = {
94 'rows_render': sqlhtml.represent,
95 'rows_xml': sqlhtml.SQLTABLE
96 }
97 DAL.Field = Field
98 DAL.Table = Table
99
100 #: add web2py contrib drivers to pyDAL
101 from pydal.drivers import DRIVERS
102 if not DRIVERS.get('pymysql'):
103 try:
104 from .contrib import pymysql
105 DRIVERS['pymysql'] = pymysql
106 except:
107 pass
108 if not DRIVERS.get('pyodbc'):
109 try:
110 from .contrib import pypyodbc as pyodbc
111 DRIVERS['pyodbc'] = pyodbc
112 except:
113 pass
114 if not DRIVERS.get('pg8000'):
115 try:
116 from .contrib import pg8000
117 DRIVERS['pg8000'] = pg8000
118 except:
119 pass
120
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/gluon/dal.py b/gluon/dal.py
--- a/gluon/dal.py
+++ b/gluon/dal.py
@@ -74,12 +74,12 @@
return requires
# does not get here for reference and list:reference
if field.unique:
- requires.insert(0,validators.IS_NOT_IN_DB(db, field))
- excluded_fields = ['string','upload','text','password','boolean']
+ requires.insert(0, validators.IS_NOT_IN_DB(db, field))
+ excluded_fields = ['string', 'upload', 'text', 'password', 'boolean']
if (field.notnull or field.unique) and not field_type in excluded_fields:
- requires.insert(0,validators.IS_NOT_EMPTY())
+ requires.insert(0, validators.IS_NOT_EMPTY())
elif not field.notnull and not field.unique and requires:
- requires[0] = validators.IS_EMPTY_OR(requires[0])
+ requires[0] = validators.IS_EMPTY_OR(requires[0], null='' if field in ('string', 'text', 'password') else None)
return requires
from gluon.serializers import custom_json, xml
@@ -93,7 +93,7 @@
DAL.representers = {
'rows_render': sqlhtml.represent,
'rows_xml': sqlhtml.SQLTABLE
- }
+}
DAL.Field = Field
DAL.Table = Table
|
{"golden_diff": "diff --git a/gluon/dal.py b/gluon/dal.py\n--- a/gluon/dal.py\n+++ b/gluon/dal.py\n@@ -74,12 +74,12 @@\n return requires\n # does not get here for reference and list:reference\n if field.unique:\n- requires.insert(0,validators.IS_NOT_IN_DB(db, field))\n- excluded_fields = ['string','upload','text','password','boolean']\n+ requires.insert(0, validators.IS_NOT_IN_DB(db, field))\n+ excluded_fields = ['string', 'upload', 'text', 'password', 'boolean']\n if (field.notnull or field.unique) and not field_type in excluded_fields:\n- requires.insert(0,validators.IS_NOT_EMPTY())\n+ requires.insert(0, validators.IS_NOT_EMPTY())\n elif not field.notnull and not field.unique and requires:\n- requires[0] = validators.IS_EMPTY_OR(requires[0])\n+ requires[0] = validators.IS_EMPTY_OR(requires[0], null='' if field in ('string', 'text', 'password') else None)\n return requires\n \n from gluon.serializers import custom_json, xml\n@@ -93,7 +93,7 @@\n DAL.representers = {\n 'rows_render': sqlhtml.represent,\n 'rows_xml': sqlhtml.SQLTABLE\n- }\n+}\n DAL.Field = Field\n DAL.Table = Table\n", "issue": "empty strings stored as NULL in db\nhttps://groups.google.com/forum/#!topic/web2py/IYzhhd3eKXQ\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\n| This file is part of the web2py Web Framework\n| Copyrighted by Massimo Di Pierro <[email protected]>\n| License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)\n\nTakes care of adapting pyDAL to web2py's needs\n-----------------------------------------------\n\"\"\"\n\nfrom pydal import DAL as DAL\nfrom pydal import Field\nfrom pydal.objects import Row, Rows, Table, Query, Set, Expression\nfrom pydal import SQLCustomType, geoPoint, geoLine, geoPolygon\n\n\ndef _default_validators(db, field):\n \"\"\"\n Field type validation, using web2py's validators mechanism.\n\n makes sure the content of a field is in line with the declared\n fieldtype\n \"\"\"\n from gluon import validators\n field_type, field_length = field.type, field.length\n requires = []\n\n if field_type in (('string', 'text', 'password')):\n requires.append(validators.IS_LENGTH(field_length))\n elif field_type == 'json':\n requires.append(validators.IS_EMPTY_OR(validators.IS_JSON()))\n elif field_type == 'double' or field_type == 'float':\n requires.append(validators.IS_FLOAT_IN_RANGE(-1e100, 1e100))\n elif field_type == 'integer':\n requires.append(validators.IS_INT_IN_RANGE(-2**31, 2**31))\n elif field_type == 'bigint':\n requires.append(validators.IS_INT_IN_RANGE(-2**63, 2**63))\n elif field_type.startswith('decimal'):\n requires.append(validators.IS_DECIMAL_IN_RANGE(-10**10, 10**10))\n elif field_type == 'date':\n requires.append(validators.IS_DATE())\n elif field_type == 'time':\n requires.append(validators.IS_TIME())\n elif field_type == 'datetime':\n requires.append(validators.IS_DATETIME())\n elif db and field_type.startswith('reference') and \\\n field_type.find('.') < 0 and \\\n field_type[10:] in db.tables:\n referenced = db[field_type[10:]]\n if hasattr(referenced, '_format') and referenced._format:\n requires = validators.IS_IN_DB(db, referenced._id,\n referenced._format)\n if field.unique:\n requires._and = validators.IS_NOT_IN_DB(db, field)\n if field.tablename == field_type[10:]:\n return validators.IS_EMPTY_OR(requires)\n return requires\n elif db and field_type.startswith('list:reference') and \\\n field_type.find('.') < 0 and \\\n field_type[15:] in db.tables:\n referenced = db[field_type[15:]]\n if hasattr(referenced, '_format') and referenced._format:\n requires = validators.IS_IN_DB(db, referenced._id,\n referenced._format, multiple=True)\n else:\n requires = validators.IS_IN_DB(db, referenced._id,\n multiple=True)\n if field.unique:\n requires._and = validators.IS_NOT_IN_DB(db, field)\n if not field.notnull:\n requires = validators.IS_EMPTY_OR(requires)\n return requires\n # does not get here for reference and list:reference\n if field.unique:\n requires.insert(0,validators.IS_NOT_IN_DB(db, field))\n excluded_fields = ['string','upload','text','password','boolean']\n if (field.notnull or field.unique) and not field_type in excluded_fields:\n requires.insert(0,validators.IS_NOT_EMPTY())\n elif not field.notnull and not field.unique and requires:\n requires[0] = validators.IS_EMPTY_OR(requires[0])\n return requires\n\nfrom gluon.serializers import custom_json, xml\nfrom gluon.utils import web2py_uuid\nfrom gluon import sqlhtml\n\n\nDAL.serializers = {'json': custom_json, 'xml': xml}\nDAL.validators_method = _default_validators\nDAL.uuid = lambda x: web2py_uuid()\nDAL.representers = {\n 'rows_render': sqlhtml.represent,\n 'rows_xml': sqlhtml.SQLTABLE\n }\nDAL.Field = Field\nDAL.Table = Table\n\n#: add web2py contrib drivers to pyDAL\nfrom pydal.drivers import DRIVERS\nif not DRIVERS.get('pymysql'):\n try:\n from .contrib import pymysql\n DRIVERS['pymysql'] = pymysql\n except:\n pass\nif not DRIVERS.get('pyodbc'):\n try:\n from .contrib import pypyodbc as pyodbc\n DRIVERS['pyodbc'] = pyodbc\n except:\n pass\nif not DRIVERS.get('pg8000'):\n try:\n from .contrib import pg8000\n DRIVERS['pg8000'] = pg8000\n except:\n pass\n", "path": "gluon/dal.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\n| This file is part of the web2py Web Framework\n| Copyrighted by Massimo Di Pierro <[email protected]>\n| License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)\n\nTakes care of adapting pyDAL to web2py's needs\n-----------------------------------------------\n\"\"\"\n\nfrom pydal import DAL as DAL\nfrom pydal import Field\nfrom pydal.objects import Row, Rows, Table, Query, Set, Expression\nfrom pydal import SQLCustomType, geoPoint, geoLine, geoPolygon\n\n\ndef _default_validators(db, field):\n \"\"\"\n Field type validation, using web2py's validators mechanism.\n\n makes sure the content of a field is in line with the declared\n fieldtype\n \"\"\"\n from gluon import validators\n field_type, field_length = field.type, field.length\n requires = []\n\n if field_type in (('string', 'text', 'password')):\n requires.append(validators.IS_LENGTH(field_length))\n elif field_type == 'json':\n requires.append(validators.IS_EMPTY_OR(validators.IS_JSON()))\n elif field_type == 'double' or field_type == 'float':\n requires.append(validators.IS_FLOAT_IN_RANGE(-1e100, 1e100))\n elif field_type == 'integer':\n requires.append(validators.IS_INT_IN_RANGE(-2**31, 2**31))\n elif field_type == 'bigint':\n requires.append(validators.IS_INT_IN_RANGE(-2**63, 2**63))\n elif field_type.startswith('decimal'):\n requires.append(validators.IS_DECIMAL_IN_RANGE(-10**10, 10**10))\n elif field_type == 'date':\n requires.append(validators.IS_DATE())\n elif field_type == 'time':\n requires.append(validators.IS_TIME())\n elif field_type == 'datetime':\n requires.append(validators.IS_DATETIME())\n elif db and field_type.startswith('reference') and \\\n field_type.find('.') < 0 and \\\n field_type[10:] in db.tables:\n referenced = db[field_type[10:]]\n if hasattr(referenced, '_format') and referenced._format:\n requires = validators.IS_IN_DB(db, referenced._id,\n referenced._format)\n if field.unique:\n requires._and = validators.IS_NOT_IN_DB(db, field)\n if field.tablename == field_type[10:]:\n return validators.IS_EMPTY_OR(requires)\n return requires\n elif db and field_type.startswith('list:reference') and \\\n field_type.find('.') < 0 and \\\n field_type[15:] in db.tables:\n referenced = db[field_type[15:]]\n if hasattr(referenced, '_format') and referenced._format:\n requires = validators.IS_IN_DB(db, referenced._id,\n referenced._format, multiple=True)\n else:\n requires = validators.IS_IN_DB(db, referenced._id,\n multiple=True)\n if field.unique:\n requires._and = validators.IS_NOT_IN_DB(db, field)\n if not field.notnull:\n requires = validators.IS_EMPTY_OR(requires)\n return requires\n # does not get here for reference and list:reference\n if field.unique:\n requires.insert(0, validators.IS_NOT_IN_DB(db, field))\n excluded_fields = ['string', 'upload', 'text', 'password', 'boolean']\n if (field.notnull or field.unique) and not field_type in excluded_fields:\n requires.insert(0, validators.IS_NOT_EMPTY())\n elif not field.notnull and not field.unique and requires:\n requires[0] = validators.IS_EMPTY_OR(requires[0], null='' if field in ('string', 'text', 'password') else None)\n return requires\n\nfrom gluon.serializers import custom_json, xml\nfrom gluon.utils import web2py_uuid\nfrom gluon import sqlhtml\n\n\nDAL.serializers = {'json': custom_json, 'xml': xml}\nDAL.validators_method = _default_validators\nDAL.uuid = lambda x: web2py_uuid()\nDAL.representers = {\n 'rows_render': sqlhtml.represent,\n 'rows_xml': sqlhtml.SQLTABLE\n}\nDAL.Field = Field\nDAL.Table = Table\n\n#: add web2py contrib drivers to pyDAL\nfrom pydal.drivers import DRIVERS\nif not DRIVERS.get('pymysql'):\n try:\n from .contrib import pymysql\n DRIVERS['pymysql'] = pymysql\n except:\n pass\nif not DRIVERS.get('pyodbc'):\n try:\n from .contrib import pypyodbc as pyodbc\n DRIVERS['pyodbc'] = pyodbc\n except:\n pass\nif not DRIVERS.get('pg8000'):\n try:\n from .contrib import pg8000\n DRIVERS['pg8000'] = pg8000\n except:\n pass\n", "path": "gluon/dal.py"}]}
| 1,598 | 312 |
gh_patches_debug_47493
|
rasdani/github-patches
|
git_diff
|
geopandas__geopandas-2249
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
DOC: Address GeoPandas op deprecation in docs
While working on #2211 I noticed instances of the `op` parameter still being used.
This `op` parameter was deprecated in pull request #1626 in favour of `predicate`.
Locations where op is still present includes:
* [sjoin benchmark](https://github.com/geopandas/geopandas/blob/master/benchmarks/sjoin.py)
* [Spatial Joins notebook](https://github.com/geopandas/geopandas/blob/master/doc/source/gallery/spatial_joins.ipynb)
I can address the notebook instance but I don't know what the benchmark instance of `op` does so wouldn't want to change it without a thumbs up from a maintainer.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `benchmarks/sjoin.py`
Content:
```
1 import random
2
3 from geopandas import GeoDataFrame, GeoSeries, sjoin
4 from shapely.geometry import Point, LineString, Polygon
5 import numpy as np
6
7
8 class Bench:
9
10 param_names = ['op']
11 params = [('intersects', 'contains', 'within')]
12
13 def setup(self, *args):
14 triangles = GeoSeries(
15 [Polygon([(random.random(), random.random()) for _ in range(3)])
16 for _ in range(1000)])
17
18 points = GeoSeries(
19 [Point(x, y) for x, y in zip(np.random.random(10000),
20 np.random.random(10000))])
21
22 df1 = GeoDataFrame({'val1': np.random.randn(len(triangles)),
23 'geometry': triangles})
24 df2 = GeoDataFrame({'val1': np.random.randn(len(points)),
25 'geometry': points})
26
27 self.df1, self.df2 = df1, df2
28
29 def time_sjoin(self, op):
30 sjoin(self.df1, self.df2, op=op)
31
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/benchmarks/sjoin.py b/benchmarks/sjoin.py
--- a/benchmarks/sjoin.py
+++ b/benchmarks/sjoin.py
@@ -26,5 +26,5 @@
self.df1, self.df2 = df1, df2
- def time_sjoin(self, op):
- sjoin(self.df1, self.df2, op=op)
+ def time_sjoin(self, predicate):
+ sjoin(self.df1, self.df2, predicate=predicate)
|
{"golden_diff": "diff --git a/benchmarks/sjoin.py b/benchmarks/sjoin.py\n--- a/benchmarks/sjoin.py\n+++ b/benchmarks/sjoin.py\n@@ -26,5 +26,5 @@\n \n self.df1, self.df2 = df1, df2\n \n- def time_sjoin(self, op):\n- sjoin(self.df1, self.df2, op=op)\n+ def time_sjoin(self, predicate):\n+ sjoin(self.df1, self.df2, predicate=predicate)\n", "issue": "DOC: Address GeoPandas op deprecation in docs\nWhile working on #2211 I noticed instances of the `op` parameter still being used.\r\n\r\nThis `op` parameter was deprecated in pull request #1626 in favour of `predicate`.\r\n\r\nLocations where op is still present includes:\r\n* [sjoin benchmark](https://github.com/geopandas/geopandas/blob/master/benchmarks/sjoin.py)\r\n* [Spatial Joins notebook](https://github.com/geopandas/geopandas/blob/master/doc/source/gallery/spatial_joins.ipynb)\r\n \r\nI can address the notebook instance but I don't know what the benchmark instance of `op` does so wouldn't want to change it without a thumbs up from a maintainer.\n", "before_files": [{"content": "import random\n\nfrom geopandas import GeoDataFrame, GeoSeries, sjoin\nfrom shapely.geometry import Point, LineString, Polygon\nimport numpy as np\n\n\nclass Bench:\n\n param_names = ['op']\n params = [('intersects', 'contains', 'within')]\n\n def setup(self, *args):\n triangles = GeoSeries(\n [Polygon([(random.random(), random.random()) for _ in range(3)])\n for _ in range(1000)])\n\n points = GeoSeries(\n [Point(x, y) for x, y in zip(np.random.random(10000),\n np.random.random(10000))])\n\n df1 = GeoDataFrame({'val1': np.random.randn(len(triangles)),\n 'geometry': triangles})\n df2 = GeoDataFrame({'val1': np.random.randn(len(points)),\n 'geometry': points})\n\n self.df1, self.df2 = df1, df2\n\n def time_sjoin(self, op):\n sjoin(self.df1, self.df2, op=op)\n", "path": "benchmarks/sjoin.py"}], "after_files": [{"content": "import random\n\nfrom geopandas import GeoDataFrame, GeoSeries, sjoin\nfrom shapely.geometry import Point, LineString, Polygon\nimport numpy as np\n\n\nclass Bench:\n\n param_names = ['op']\n params = [('intersects', 'contains', 'within')]\n\n def setup(self, *args):\n triangles = GeoSeries(\n [Polygon([(random.random(), random.random()) for _ in range(3)])\n for _ in range(1000)])\n\n points = GeoSeries(\n [Point(x, y) for x, y in zip(np.random.random(10000),\n np.random.random(10000))])\n\n df1 = GeoDataFrame({'val1': np.random.randn(len(triangles)),\n 'geometry': triangles})\n df2 = GeoDataFrame({'val1': np.random.randn(len(points)),\n 'geometry': points})\n\n self.df1, self.df2 = df1, df2\n\n def time_sjoin(self, predicate):\n sjoin(self.df1, self.df2, predicate=predicate)\n", "path": "benchmarks/sjoin.py"}]}
| 702 | 116 |
gh_patches_debug_30426
|
rasdani/github-patches
|
git_diff
|
sunpy__sunpy-2658
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Figure tests directory gets wiped after running via the test runner.
Just discovered that when I run:
```bash
python setup.py test --figure-only
```
The resulting figure images are saved in a tmp directory that gets wiped at the end of the process (this is under linux for me)
Using `pytest` adds the folder into my sunpy folder.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sunpy/conftest.py`
Content:
```
1 from __future__ import absolute_import, print_function
2 from functools import partial
3
4 import os
5 import tempfile
6 import json
7
8 # Force MPL to use non-gui backends for testing.
9 try:
10 import matplotlib
11 except ImportError:
12 pass
13 else:
14 matplotlib.use('Agg')
15
16 from sunpy.tests.hash import HASH_LIBRARY_NAME
17 from sunpy.tests.helpers import new_hash_library, test_fig_dir
18 from sunpy.extern import six
19
20 import pytest
21
22
23 # Don't actually import pytest_remotedata because that can do things to the
24 # entrypoints code in pytest.
25 if six.PY2:
26 import imp
27 try:
28 imp.find_module('pytest_remotedata')
29 HAVE_REMOTEDATA = True
30 except ImportError:
31 HAVE_REMOTEDATA = False
32 else:
33 import importlib
34 remotedata_spec = importlib.util.find_spec("pytest_remotedata")
35 HAVE_REMOTEDATA = remotedata_spec is not None
36
37
38 def pytest_runtest_setup(item):
39 """
40 pytest hook to skip all tests that have the mark 'online' if the
41 client is online (simply detected by checking whether http://www.google.com
42 can be requested).
43 """
44 if isinstance(item, item.Function):
45 if 'remote_data' in item.keywords and not HAVE_REMOTEDATA:
46 pytest.skip("skipping remotedata tests as pytest-remotedata is not installed")
47
48
49 def pytest_unconfigure(config):
50 if len(new_hash_library) > 0:
51 # Write the new hash library in JSON
52 hashfile = os.path.join(test_fig_dir, HASH_LIBRARY_NAME)
53 with open(hashfile, 'w') as outfile:
54 json.dump(new_hash_library, outfile, sort_keys=True, indent=4, separators=(',', ': '))
55
56 print('All images from image tests can be found in {0}'.format(test_fig_dir))
57 print("The corresponding hash library is {0}".format(hashfile))
58
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/sunpy/conftest.py b/sunpy/conftest.py
--- a/sunpy/conftest.py
+++ b/sunpy/conftest.py
@@ -13,8 +13,9 @@
else:
matplotlib.use('Agg')
+import sunpy.tests.helpers
from sunpy.tests.hash import HASH_LIBRARY_NAME
-from sunpy.tests.helpers import new_hash_library, test_fig_dir
+from sunpy.tests.helpers import new_hash_library
from sunpy.extern import six
import pytest
@@ -35,6 +36,15 @@
HAVE_REMOTEDATA = remotedata_spec is not None
+def pytest_addoption(parser):
+ parser.addoption("--figure_dir", action="store", default="./figure_test_images")
+
+
[email protected](scope='session', autouse=True)
+def figure_base_dir(request):
+ sunpy.tests.helpers.figure_base_dir = request.config.getoption("--figure_dir")
+
+
def pytest_runtest_setup(item):
"""
pytest hook to skip all tests that have the mark 'online' if the
@@ -49,9 +59,10 @@
def pytest_unconfigure(config):
if len(new_hash_library) > 0:
# Write the new hash library in JSON
- hashfile = os.path.join(test_fig_dir, HASH_LIBRARY_NAME)
+ figure_base_dir = os.path.abspath(config.getoption("--figure_dir"))
+ hashfile = os.path.join(figure_base_dir, HASH_LIBRARY_NAME)
with open(hashfile, 'w') as outfile:
json.dump(new_hash_library, outfile, sort_keys=True, indent=4, separators=(',', ': '))
- print('All images from image tests can be found in {0}'.format(test_fig_dir))
+ print('All images from image tests can be found in {0}'.format(figure_base_dir))
print("The corresponding hash library is {0}".format(hashfile))
|
{"golden_diff": "diff --git a/sunpy/conftest.py b/sunpy/conftest.py\n--- a/sunpy/conftest.py\n+++ b/sunpy/conftest.py\n@@ -13,8 +13,9 @@\n else:\n matplotlib.use('Agg')\n \n+import sunpy.tests.helpers\n from sunpy.tests.hash import HASH_LIBRARY_NAME\n-from sunpy.tests.helpers import new_hash_library, test_fig_dir\n+from sunpy.tests.helpers import new_hash_library\n from sunpy.extern import six\n \n import pytest\n@@ -35,6 +36,15 @@\n HAVE_REMOTEDATA = remotedata_spec is not None\n \n \n+def pytest_addoption(parser):\n+ parser.addoption(\"--figure_dir\", action=\"store\", default=\"./figure_test_images\")\n+\n+\[email protected](scope='session', autouse=True)\n+def figure_base_dir(request):\n+ sunpy.tests.helpers.figure_base_dir = request.config.getoption(\"--figure_dir\")\n+\n+\n def pytest_runtest_setup(item):\n \"\"\"\n pytest hook to skip all tests that have the mark 'online' if the\n@@ -49,9 +59,10 @@\n def pytest_unconfigure(config):\n if len(new_hash_library) > 0:\n # Write the new hash library in JSON\n- hashfile = os.path.join(test_fig_dir, HASH_LIBRARY_NAME)\n+ figure_base_dir = os.path.abspath(config.getoption(\"--figure_dir\"))\n+ hashfile = os.path.join(figure_base_dir, HASH_LIBRARY_NAME)\n with open(hashfile, 'w') as outfile:\n json.dump(new_hash_library, outfile, sort_keys=True, indent=4, separators=(',', ': '))\n \n- print('All images from image tests can be found in {0}'.format(test_fig_dir))\n+ print('All images from image tests can be found in {0}'.format(figure_base_dir))\n print(\"The corresponding hash library is {0}\".format(hashfile))\n", "issue": "Figure tests directory gets wiped after running via the test runner.\nJust discovered that when I run:\r\n```bash\r\npython setup.py test --figure-only\r\n```\r\nThe resulting figure images are saved in a tmp directory that gets wiped at the end of the process (this is under linux for me)\r\n\r\nUsing `pytest` adds the folder into my sunpy folder.\r\n\r\n\n", "before_files": [{"content": "from __future__ import absolute_import, print_function\nfrom functools import partial\n\nimport os\nimport tempfile\nimport json\n\n# Force MPL to use non-gui backends for testing.\ntry:\n import matplotlib\nexcept ImportError:\n pass\nelse:\n matplotlib.use('Agg')\n\nfrom sunpy.tests.hash import HASH_LIBRARY_NAME\nfrom sunpy.tests.helpers import new_hash_library, test_fig_dir\nfrom sunpy.extern import six\n\nimport pytest\n\n\n# Don't actually import pytest_remotedata because that can do things to the\n# entrypoints code in pytest.\nif six.PY2:\n import imp\n try:\n imp.find_module('pytest_remotedata')\n HAVE_REMOTEDATA = True\n except ImportError:\n HAVE_REMOTEDATA = False\nelse:\n import importlib\n remotedata_spec = importlib.util.find_spec(\"pytest_remotedata\")\n HAVE_REMOTEDATA = remotedata_spec is not None\n\n\ndef pytest_runtest_setup(item):\n \"\"\"\n pytest hook to skip all tests that have the mark 'online' if the\n client is online (simply detected by checking whether http://www.google.com\n can be requested).\n \"\"\"\n if isinstance(item, item.Function):\n if 'remote_data' in item.keywords and not HAVE_REMOTEDATA:\n pytest.skip(\"skipping remotedata tests as pytest-remotedata is not installed\")\n\n\ndef pytest_unconfigure(config):\n if len(new_hash_library) > 0:\n # Write the new hash library in JSON\n hashfile = os.path.join(test_fig_dir, HASH_LIBRARY_NAME)\n with open(hashfile, 'w') as outfile:\n json.dump(new_hash_library, outfile, sort_keys=True, indent=4, separators=(',', ': '))\n\n print('All images from image tests can be found in {0}'.format(test_fig_dir))\n print(\"The corresponding hash library is {0}\".format(hashfile))\n", "path": "sunpy/conftest.py"}], "after_files": [{"content": "from __future__ import absolute_import, print_function\nfrom functools import partial\n\nimport os\nimport tempfile\nimport json\n\n# Force MPL to use non-gui backends for testing.\ntry:\n import matplotlib\nexcept ImportError:\n pass\nelse:\n matplotlib.use('Agg')\n\nimport sunpy.tests.helpers\nfrom sunpy.tests.hash import HASH_LIBRARY_NAME\nfrom sunpy.tests.helpers import new_hash_library\nfrom sunpy.extern import six\n\nimport pytest\n\n\n# Don't actually import pytest_remotedata because that can do things to the\n# entrypoints code in pytest.\nif six.PY2:\n import imp\n try:\n imp.find_module('pytest_remotedata')\n HAVE_REMOTEDATA = True\n except ImportError:\n HAVE_REMOTEDATA = False\nelse:\n import importlib\n remotedata_spec = importlib.util.find_spec(\"pytest_remotedata\")\n HAVE_REMOTEDATA = remotedata_spec is not None\n\n\ndef pytest_addoption(parser):\n parser.addoption(\"--figure_dir\", action=\"store\", default=\"./figure_test_images\")\n\n\[email protected](scope='session', autouse=True)\ndef figure_base_dir(request):\n sunpy.tests.helpers.figure_base_dir = request.config.getoption(\"--figure_dir\")\n\n\ndef pytest_runtest_setup(item):\n \"\"\"\n pytest hook to skip all tests that have the mark 'online' if the\n client is online (simply detected by checking whether http://www.google.com\n can be requested).\n \"\"\"\n if isinstance(item, item.Function):\n if 'remote_data' in item.keywords and not HAVE_REMOTEDATA:\n pytest.skip(\"skipping remotedata tests as pytest-remotedata is not installed\")\n\n\ndef pytest_unconfigure(config):\n if len(new_hash_library) > 0:\n # Write the new hash library in JSON\n figure_base_dir = os.path.abspath(config.getoption(\"--figure_dir\"))\n hashfile = os.path.join(figure_base_dir, HASH_LIBRARY_NAME)\n with open(hashfile, 'w') as outfile:\n json.dump(new_hash_library, outfile, sort_keys=True, indent=4, separators=(',', ': '))\n\n print('All images from image tests can be found in {0}'.format(figure_base_dir))\n print(\"The corresponding hash library is {0}\".format(hashfile))\n", "path": "sunpy/conftest.py"}]}
| 863 | 419 |
gh_patches_debug_6715
|
rasdani/github-patches
|
git_diff
|
frappe__frappe-15256
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ImportError: Module import failed for IMAP Folder
Migrating from latest `version-13` to `develop`:
```
> bench --site my-site migrate
Migrating my-site
Executing frappe.patches.v14_0.copy_mail_data #08.03.21 in my-site (_f0159c4c37bf09c0)
Traceback (most recent call last):
File "/home/frappe/frappe-bench/apps/frappe/frappe/modules/utils.py", line 202, in load_doctype_module
doctype_python_modules[key] = frappe.get_module(module_name)
File "/home/frappe/frappe-bench/apps/frappe/frappe/__init__.py", line 987, in get_module
return importlib.import_module(modulename)
File "/home/frappe/frappe-bench/env/lib/python3.6/importlib/__init__.py", line 126, in import_module
return _bootstrap._gcd_import(name[level:], package, level)
File "<frozen importlib._bootstrap>", line 994, in _gcd_import
File "<frozen importlib._bootstrap>", line 971, in _find_and_load
File "<frozen importlib._bootstrap>", line 941, in _find_and_load_unlocked
File "<frozen importlib._bootstrap>", line 219, in _call_with_frames_removed
File "<frozen importlib._bootstrap>", line 994, in _gcd_import
File "<frozen importlib._bootstrap>", line 971, in _find_and_load
File "<frozen importlib._bootstrap>", line 953, in _find_and_load_unlocked
ModuleNotFoundError: No module named 'frappe.core.doctype.imap_folder'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/lib/python3.6/runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "/usr/lib/python3.6/runpy.py", line 85, in _run_code
exec(code, run_globals)
File "/home/frappe/frappe-bench/apps/frappe/frappe/utils/bench_helper.py", line 104, in <module>
main()
File "/home/frappe/frappe-bench/apps/frappe/frappe/utils/bench_helper.py", line 18, in main
click.Group(commands=commands)(prog_name='bench')
File "/home/frappe/frappe-bench/env/lib/python3.6/site-packages/click/core.py", line 829, in __call__
return self.main(*args, **kwargs)
File "/home/frappe/frappe-bench/env/lib/python3.6/site-packages/click/core.py", line 782, in main
rv = self.invoke(ctx)
File "/home/frappe/frappe-bench/env/lib/python3.6/site-packages/click/core.py", line 1259, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/home/frappe/frappe-bench/env/lib/python3.6/site-packages/click/core.py", line 1259, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/home/frappe/frappe-bench/env/lib/python3.6/site-packages/click/core.py", line 1066, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/home/frappe/frappe-bench/env/lib/python3.6/site-packages/click/core.py", line 610, in invoke
return callback(*args, **kwargs)
File "/home/frappe/frappe-bench/env/lib/python3.6/site-packages/click/decorators.py", line 21, in new_func
return f(get_current_context(), *args, **kwargs)
File "/home/frappe/frappe-bench/apps/frappe/frappe/commands/__init__.py", line 26, in _func
ret = f(frappe._dict(ctx.obj), *args, **kwargs)
File "/home/frappe/frappe-bench/apps/frappe/frappe/commands/site.py", line 460, in migrate
skip_search_index=skip_search_index
File "/home/frappe/frappe-bench/apps/frappe/frappe/migrate.py", line 68, in migrate
frappe.modules.patch_handler.run_all(skip_failing)
File "/home/frappe/frappe-bench/apps/frappe/frappe/modules/patch_handler.py", line 36, in run_all
run_patch(patch)
File "/home/frappe/frappe-bench/apps/frappe/frappe/modules/patch_handler.py", line 25, in run_patch
if not run_single(patchmodule = patch):
File "/home/frappe/frappe-bench/apps/frappe/frappe/modules/patch_handler.py", line 66, in run_single
return execute_patch(patchmodule, method, methodargs)
File "/home/frappe/frappe-bench/apps/frappe/frappe/modules/patch_handler.py", line 86, in execute_patch
frappe.get_attr(patchmodule.split()[0] + ".execute")()
File "/home/frappe/frappe-bench/apps/frappe/frappe/patches/v14_0/copy_mail_data.py", line 19, in execute
"uidnext": doc.uidnext,
File "/home/frappe/frappe-bench/apps/frappe/frappe/model/base_document.py", line 183, in append
value = self._init_child(value, key)
File "/home/frappe/frappe-bench/apps/frappe/frappe/model/base_document.py", line 222, in _init_child
value = get_controller(value["doctype"])(value)
File "/home/frappe/frappe-bench/apps/frappe/frappe/model/base_document.py", line 70, in get_controller
site_controllers[doctype] = _get_controller()
File "/home/frappe/frappe-bench/apps/frappe/frappe/model/base_document.py", line 52, in _get_controller
module = load_doctype_module(doctype, module_name)
File "/home/frappe/frappe-bench/apps/frappe/frappe/modules/utils.py", line 204, in load_doctype_module
raise ImportError('Module import failed for {0} ({1})'.format(doctype, module_name + ' Error: ' + str(e)))
ImportError: Module import failed for IMAP Folder (frappe.core.doctype.imap_folder.imap_folder Error: No module named 'frappe.core.doctype.imap_folder')
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `frappe/patches/v14_0/copy_mail_data.py`
Content:
```
1 from __future__ import unicode_literals
2 import frappe
3
4
5 def execute():
6 frappe.reload_doc("email", "doctype", "email_account")
7 # patch for all Email Account with the flag use_imap
8 for email_account in frappe.get_list("Email Account", filters={"enable_incoming": 1, "use_imap": 1}):
9 # get all data from Email Account
10 doc = frappe.get_doc("Email Account", email_account.name)
11
12 imap_list = [folder.folder_name for folder in doc.imap_folder]
13 # and append the old data to the child table
14 if doc.uidvalidity or doc.uidnext and "INBOX" not in imap_list:
15 doc.append("imap_folder", {
16 "folder_name": "INBOX",
17 "append_to": doc.append_to,
18 "uid_validity": doc.uidvalidity,
19 "uidnext": doc.uidnext,
20 })
21
22 doc.save()
23
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/frappe/patches/v14_0/copy_mail_data.py b/frappe/patches/v14_0/copy_mail_data.py
--- a/frappe/patches/v14_0/copy_mail_data.py
+++ b/frappe/patches/v14_0/copy_mail_data.py
@@ -3,7 +3,9 @@
def execute():
+ frappe.reload_doc("email", "doctype", "imap_folder")
frappe.reload_doc("email", "doctype", "email_account")
+
# patch for all Email Account with the flag use_imap
for email_account in frappe.get_list("Email Account", filters={"enable_incoming": 1, "use_imap": 1}):
# get all data from Email Account
|
{"golden_diff": "diff --git a/frappe/patches/v14_0/copy_mail_data.py b/frappe/patches/v14_0/copy_mail_data.py\n--- a/frappe/patches/v14_0/copy_mail_data.py\n+++ b/frappe/patches/v14_0/copy_mail_data.py\n@@ -3,7 +3,9 @@\n \n \n def execute():\n+\tfrappe.reload_doc(\"email\", \"doctype\", \"imap_folder\")\n \tfrappe.reload_doc(\"email\", \"doctype\", \"email_account\")\n+\n \t# patch for all Email Account with the flag use_imap\n \tfor email_account in frappe.get_list(\"Email Account\", filters={\"enable_incoming\": 1, \"use_imap\": 1}):\n \t\t# get all data from Email Account\n", "issue": "ImportError: Module import failed for IMAP Folder\nMigrating from latest `version-13` to `develop`:\r\n\r\n```\r\n> bench --site my-site migrate\r\nMigrating my-site\r\nExecuting frappe.patches.v14_0.copy_mail_data #08.03.21 in my-site (_f0159c4c37bf09c0)\r\n\r\nTraceback (most recent call last):\r\n File \"/home/frappe/frappe-bench/apps/frappe/frappe/modules/utils.py\", line 202, in load_doctype_module\r\n doctype_python_modules[key] = frappe.get_module(module_name)\r\n File \"/home/frappe/frappe-bench/apps/frappe/frappe/__init__.py\", line 987, in get_module\r\n return importlib.import_module(modulename)\r\n File \"/home/frappe/frappe-bench/env/lib/python3.6/importlib/__init__.py\", line 126, in import_module\r\n return _bootstrap._gcd_import(name[level:], package, level)\r\n File \"<frozen importlib._bootstrap>\", line 994, in _gcd_import\r\n File \"<frozen importlib._bootstrap>\", line 971, in _find_and_load\r\n File \"<frozen importlib._bootstrap>\", line 941, in _find_and_load_unlocked\r\n File \"<frozen importlib._bootstrap>\", line 219, in _call_with_frames_removed\r\n File \"<frozen importlib._bootstrap>\", line 994, in _gcd_import\r\n File \"<frozen importlib._bootstrap>\", line 971, in _find_and_load\r\n File \"<frozen importlib._bootstrap>\", line 953, in _find_and_load_unlocked\r\nModuleNotFoundError: No module named 'frappe.core.doctype.imap_folder'\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nTraceback (most recent call last):\r\n File \"/usr/lib/python3.6/runpy.py\", line 193, in _run_module_as_main\r\n \"__main__\", mod_spec)\r\n File \"/usr/lib/python3.6/runpy.py\", line 85, in _run_code\r\n exec(code, run_globals)\r\n File \"/home/frappe/frappe-bench/apps/frappe/frappe/utils/bench_helper.py\", line 104, in <module>\r\n main()\r\n File \"/home/frappe/frappe-bench/apps/frappe/frappe/utils/bench_helper.py\", line 18, in main\r\n click.Group(commands=commands)(prog_name='bench')\r\n File \"/home/frappe/frappe-bench/env/lib/python3.6/site-packages/click/core.py\", line 829, in __call__\r\n return self.main(*args, **kwargs)\r\n File \"/home/frappe/frappe-bench/env/lib/python3.6/site-packages/click/core.py\", line 782, in main\r\n rv = self.invoke(ctx)\r\n File \"/home/frappe/frappe-bench/env/lib/python3.6/site-packages/click/core.py\", line 1259, in invoke\r\n return _process_result(sub_ctx.command.invoke(sub_ctx))\r\n File \"/home/frappe/frappe-bench/env/lib/python3.6/site-packages/click/core.py\", line 1259, in invoke\r\n return _process_result(sub_ctx.command.invoke(sub_ctx))\r\n File \"/home/frappe/frappe-bench/env/lib/python3.6/site-packages/click/core.py\", line 1066, in invoke\r\n return ctx.invoke(self.callback, **ctx.params)\r\n File \"/home/frappe/frappe-bench/env/lib/python3.6/site-packages/click/core.py\", line 610, in invoke\r\n return callback(*args, **kwargs)\r\n File \"/home/frappe/frappe-bench/env/lib/python3.6/site-packages/click/decorators.py\", line 21, in new_func\r\n return f(get_current_context(), *args, **kwargs)\r\n File \"/home/frappe/frappe-bench/apps/frappe/frappe/commands/__init__.py\", line 26, in _func\r\n ret = f(frappe._dict(ctx.obj), *args, **kwargs)\r\n File \"/home/frappe/frappe-bench/apps/frappe/frappe/commands/site.py\", line 460, in migrate\r\n skip_search_index=skip_search_index\r\n File \"/home/frappe/frappe-bench/apps/frappe/frappe/migrate.py\", line 68, in migrate\r\n frappe.modules.patch_handler.run_all(skip_failing)\r\n File \"/home/frappe/frappe-bench/apps/frappe/frappe/modules/patch_handler.py\", line 36, in run_all\r\n run_patch(patch)\r\n File \"/home/frappe/frappe-bench/apps/frappe/frappe/modules/patch_handler.py\", line 25, in run_patch\r\n if not run_single(patchmodule = patch):\r\n File \"/home/frappe/frappe-bench/apps/frappe/frappe/modules/patch_handler.py\", line 66, in run_single\r\n return execute_patch(patchmodule, method, methodargs)\r\n File \"/home/frappe/frappe-bench/apps/frappe/frappe/modules/patch_handler.py\", line 86, in execute_patch\r\n frappe.get_attr(patchmodule.split()[0] + \".execute\")()\r\n File \"/home/frappe/frappe-bench/apps/frappe/frappe/patches/v14_0/copy_mail_data.py\", line 19, in execute\r\n \"uidnext\": doc.uidnext,\r\n File \"/home/frappe/frappe-bench/apps/frappe/frappe/model/base_document.py\", line 183, in append\r\n value = self._init_child(value, key)\r\n File \"/home/frappe/frappe-bench/apps/frappe/frappe/model/base_document.py\", line 222, in _init_child\r\n value = get_controller(value[\"doctype\"])(value)\r\n File \"/home/frappe/frappe-bench/apps/frappe/frappe/model/base_document.py\", line 70, in get_controller\r\n site_controllers[doctype] = _get_controller()\r\n File \"/home/frappe/frappe-bench/apps/frappe/frappe/model/base_document.py\", line 52, in _get_controller\r\n module = load_doctype_module(doctype, module_name)\r\n File \"/home/frappe/frappe-bench/apps/frappe/frappe/modules/utils.py\", line 204, in load_doctype_module\r\n raise ImportError('Module import failed for {0} ({1})'.format(doctype, module_name + ' Error: ' + str(e)))\r\nImportError: Module import failed for IMAP Folder (frappe.core.doctype.imap_folder.imap_folder Error: No module named 'frappe.core.doctype.imap_folder')\r\n```\n", "before_files": [{"content": "from __future__ import unicode_literals\nimport frappe\n\n\ndef execute():\n\tfrappe.reload_doc(\"email\", \"doctype\", \"email_account\")\n\t# patch for all Email Account with the flag use_imap\n\tfor email_account in frappe.get_list(\"Email Account\", filters={\"enable_incoming\": 1, \"use_imap\": 1}):\n\t\t# get all data from Email Account\n\t\tdoc = frappe.get_doc(\"Email Account\", email_account.name)\n\n\t\timap_list = [folder.folder_name for folder in doc.imap_folder]\n\t\t# and append the old data to the child table\n\t\tif doc.uidvalidity or doc.uidnext and \"INBOX\" not in imap_list:\n\t\t\tdoc.append(\"imap_folder\", {\n\t\t\t\t\"folder_name\": \"INBOX\",\n\t\t\t\t\"append_to\": doc.append_to,\n\t\t\t\t\"uid_validity\": doc.uidvalidity,\n\t\t\t\t\"uidnext\": doc.uidnext,\n\t\t\t})\n\n\t\t\tdoc.save()\n", "path": "frappe/patches/v14_0/copy_mail_data.py"}], "after_files": [{"content": "from __future__ import unicode_literals\nimport frappe\n\n\ndef execute():\n\tfrappe.reload_doc(\"email\", \"doctype\", \"imap_folder\")\n\tfrappe.reload_doc(\"email\", \"doctype\", \"email_account\")\n\n\t# patch for all Email Account with the flag use_imap\n\tfor email_account in frappe.get_list(\"Email Account\", filters={\"enable_incoming\": 1, \"use_imap\": 1}):\n\t\t# get all data from Email Account\n\t\tdoc = frappe.get_doc(\"Email Account\", email_account.name)\n\n\t\timap_list = [folder.folder_name for folder in doc.imap_folder]\n\t\t# and append the old data to the child table\n\t\tif doc.uidvalidity or doc.uidnext and \"INBOX\" not in imap_list:\n\t\t\tdoc.append(\"imap_folder\", {\n\t\t\t\t\"folder_name\": \"INBOX\",\n\t\t\t\t\"append_to\": doc.append_to,\n\t\t\t\t\"uid_validity\": doc.uidvalidity,\n\t\t\t\t\"uidnext\": doc.uidnext,\n\t\t\t})\n\n\t\t\tdoc.save()\n", "path": "frappe/patches/v14_0/copy_mail_data.py"}]}
| 1,980 | 171 |
gh_patches_debug_27894
|
rasdani/github-patches
|
git_diff
|
Gallopsled__pwntools-1864
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
"pwn template" emits a Python2 shebang
We should probably update this to use Python3 explicitly, since that's what we recommend.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pwnlib/commandline/template.py`
Content:
```
1 #!/usr/bin/env python2
2 from __future__ import absolute_import
3 from __future__ import division
4
5 import re
6
7 from pwn import *
8 from pwnlib.commandline import common
9
10 from mako.lookup import TemplateLookup
11
12 parser = common.parser_commands.add_parser(
13 'template',
14 help = 'Generate an exploit template',
15 description = 'Generate an exploit template'
16 )
17
18 parser.add_argument('exe', nargs='?', help='Target binary')
19 parser.add_argument('--host', help='Remote host / SSH server')
20 parser.add_argument('--port', help='Remote port / SSH port', type=int)
21 parser.add_argument('--user', help='SSH Username')
22 parser.add_argument('--pass', '--password', help='SSH Password', dest='password')
23 parser.add_argument('--path', help='Remote path of file on SSH server')
24 parser.add_argument('--quiet', help='Less verbose template comments', action='store_true')
25 parser.add_argument('--color', help='Print the output in color', choices=['never', 'always', 'auto'], default='auto')
26
27 def main(args):
28 cache = None
29
30 if cache:
31 cache = os.path.join(context.cache_dir, 'mako')
32
33 lookup = TemplateLookup(
34 directories = [os.path.join(pwnlib.data.path, 'templates')],
35 module_directory = cache
36 )
37
38 # For the SSH scenario, check that the binary is at the
39 # same path on the remote host.
40 if args.user:
41 if not (args.path or args.exe):
42 log.error("Must specify --path or a exe")
43
44 s = ssh(args.user, args.host, args.port or 22, args.password or None)
45
46 try:
47 remote = args.path or args.exe
48 s.download(remote)
49 except Exception:
50 log.warning("Could not download file %r, opening a shell", remote)
51 s.interactive()
52 return
53
54 if not args.exe:
55 args.exe = os.path.basename(args.path)
56
57 template = lookup.get_template('pwnup.mako')
58 output = template.render(args.exe,
59 args.host,
60 args.port,
61 args.user,
62 args.password,
63 args.path,
64 args.quiet)
65
66 # Fix Mako formatting bs
67 output = re.sub('\n\n\n', '\n\n', output)
68
69 # Colorize the output if it's a TTY
70 if args.color == 'always' or (args.color == 'auto' and sys.stdout.isatty()):
71 from pygments import highlight
72 from pygments.formatters import TerminalFormatter
73 from pygments.lexers.python import PythonLexer
74 output = highlight(output, PythonLexer(), TerminalFormatter())
75
76 print(output)
77
78 # If redirected to a file, make the resulting script executable
79 if not sys.stdout.isatty():
80 try: os.fchmod(sys.stdout.fileno(), 0o700)
81 except OSError: pass
82
83 if __name__ == '__main__':
84 pwnlib.commandline.common.main(__file__)
85
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pwnlib/commandline/template.py b/pwnlib/commandline/template.py
old mode 100644
new mode 100755
--- a/pwnlib/commandline/template.py
+++ b/pwnlib/commandline/template.py
@@ -2,8 +2,6 @@
from __future__ import absolute_import
from __future__ import division
-import re
-
from pwn import *
from pwnlib.commandline import common
@@ -25,14 +23,9 @@
parser.add_argument('--color', help='Print the output in color', choices=['never', 'always', 'auto'], default='auto')
def main(args):
- cache = None
-
- if cache:
- cache = os.path.join(context.cache_dir, 'mako')
-
lookup = TemplateLookup(
directories = [os.path.join(pwnlib.data.path, 'templates')],
- module_directory = cache
+ module_directory = None
)
# For the SSH scenario, check that the binary is at the
@@ -44,10 +37,10 @@
s = ssh(args.user, args.host, args.port or 22, args.password or None)
try:
- remote = args.path or args.exe
- s.download(remote)
+ remote_file = args.path or args.exe
+ s.download(remote_file)
except Exception:
- log.warning("Could not download file %r, opening a shell", remote)
+ log.warning("Could not download file %r, opening a shell", remote_file)
s.interactive()
return
|
{"golden_diff": "diff --git a/pwnlib/commandline/template.py b/pwnlib/commandline/template.py\nold mode 100644\nnew mode 100755\n--- a/pwnlib/commandline/template.py\n+++ b/pwnlib/commandline/template.py\n@@ -2,8 +2,6 @@\n from __future__ import absolute_import\n from __future__ import division\n \n-import re\n-\n from pwn import *\n from pwnlib.commandline import common\n \n@@ -25,14 +23,9 @@\n parser.add_argument('--color', help='Print the output in color', choices=['never', 'always', 'auto'], default='auto')\n \n def main(args):\n- cache = None\n-\n- if cache:\n- cache = os.path.join(context.cache_dir, 'mako')\n-\n lookup = TemplateLookup(\n directories = [os.path.join(pwnlib.data.path, 'templates')],\n- module_directory = cache\n+ module_directory = None\n )\n \n # For the SSH scenario, check that the binary is at the\n@@ -44,10 +37,10 @@\n s = ssh(args.user, args.host, args.port or 22, args.password or None)\n \n try:\n- remote = args.path or args.exe\n- s.download(remote)\n+ remote_file = args.path or args.exe\n+ s.download(remote_file)\n except Exception:\n- log.warning(\"Could not download file %r, opening a shell\", remote)\n+ log.warning(\"Could not download file %r, opening a shell\", remote_file)\n s.interactive()\n return\n", "issue": "\"pwn template\" emits a Python2 shebang\nWe should probably update this to use Python3 explicitly, since that's what we recommend.\n", "before_files": [{"content": "#!/usr/bin/env python2\nfrom __future__ import absolute_import\nfrom __future__ import division\n\nimport re\n\nfrom pwn import *\nfrom pwnlib.commandline import common\n\nfrom mako.lookup import TemplateLookup\n\nparser = common.parser_commands.add_parser(\n 'template',\n help = 'Generate an exploit template',\n description = 'Generate an exploit template'\n)\n\nparser.add_argument('exe', nargs='?', help='Target binary')\nparser.add_argument('--host', help='Remote host / SSH server')\nparser.add_argument('--port', help='Remote port / SSH port', type=int)\nparser.add_argument('--user', help='SSH Username')\nparser.add_argument('--pass', '--password', help='SSH Password', dest='password')\nparser.add_argument('--path', help='Remote path of file on SSH server')\nparser.add_argument('--quiet', help='Less verbose template comments', action='store_true')\nparser.add_argument('--color', help='Print the output in color', choices=['never', 'always', 'auto'], default='auto')\n\ndef main(args):\n cache = None\n\n if cache:\n cache = os.path.join(context.cache_dir, 'mako')\n\n lookup = TemplateLookup(\n directories = [os.path.join(pwnlib.data.path, 'templates')],\n module_directory = cache\n )\n\n # For the SSH scenario, check that the binary is at the\n # same path on the remote host.\n if args.user:\n if not (args.path or args.exe):\n log.error(\"Must specify --path or a exe\")\n\n s = ssh(args.user, args.host, args.port or 22, args.password or None)\n\n try:\n remote = args.path or args.exe\n s.download(remote)\n except Exception:\n log.warning(\"Could not download file %r, opening a shell\", remote)\n s.interactive()\n return\n\n if not args.exe:\n args.exe = os.path.basename(args.path)\n\n template = lookup.get_template('pwnup.mako')\n output = template.render(args.exe,\n args.host,\n args.port,\n args.user,\n args.password,\n args.path,\n args.quiet)\n\n # Fix Mako formatting bs\n output = re.sub('\\n\\n\\n', '\\n\\n', output)\n\n # Colorize the output if it's a TTY\n if args.color == 'always' or (args.color == 'auto' and sys.stdout.isatty()):\n from pygments import highlight\n from pygments.formatters import TerminalFormatter\n from pygments.lexers.python import PythonLexer\n output = highlight(output, PythonLexer(), TerminalFormatter())\n\n print(output)\n\n # If redirected to a file, make the resulting script executable\n if not sys.stdout.isatty():\n try: os.fchmod(sys.stdout.fileno(), 0o700)\n except OSError: pass\n\nif __name__ == '__main__':\n pwnlib.commandline.common.main(__file__)\n", "path": "pwnlib/commandline/template.py"}], "after_files": [{"content": "#!/usr/bin/env python2\nfrom __future__ import absolute_import\nfrom __future__ import division\n\nfrom pwn import *\nfrom pwnlib.commandline import common\n\nfrom mako.lookup import TemplateLookup\n\nparser = common.parser_commands.add_parser(\n 'template',\n help = 'Generate an exploit template',\n description = 'Generate an exploit template'\n)\n\nparser.add_argument('exe', nargs='?', help='Target binary')\nparser.add_argument('--host', help='Remote host / SSH server')\nparser.add_argument('--port', help='Remote port / SSH port', type=int)\nparser.add_argument('--user', help='SSH Username')\nparser.add_argument('--pass', '--password', help='SSH Password', dest='password')\nparser.add_argument('--path', help='Remote path of file on SSH server')\nparser.add_argument('--quiet', help='Less verbose template comments', action='store_true')\nparser.add_argument('--color', help='Print the output in color', choices=['never', 'always', 'auto'], default='auto')\n\ndef main(args):\n lookup = TemplateLookup(\n directories = [os.path.join(pwnlib.data.path, 'templates')],\n module_directory = None\n )\n\n # For the SSH scenario, check that the binary is at the\n # same path on the remote host.\n if args.user:\n if not (args.path or args.exe):\n log.error(\"Must specify --path or a exe\")\n\n s = ssh(args.user, args.host, args.port or 22, args.password or None)\n\n try:\n remote_file = args.path or args.exe\n s.download(remote_file)\n except Exception:\n log.warning(\"Could not download file %r, opening a shell\", remote_file)\n s.interactive()\n return\n\n if not args.exe:\n args.exe = os.path.basename(args.path)\n\n template = lookup.get_template('pwnup.mako')\n output = template.render(args.exe,\n args.host,\n args.port,\n args.user,\n args.password,\n args.path,\n args.quiet)\n\n # Fix Mako formatting bs\n output = re.sub('\\n\\n\\n', '\\n\\n', output)\n\n # Colorize the output if it's a TTY\n if args.color == 'always' or (args.color == 'auto' and sys.stdout.isatty()):\n from pygments import highlight\n from pygments.formatters import TerminalFormatter\n from pygments.lexers.python import PythonLexer\n output = highlight(output, PythonLexer(), TerminalFormatter())\n\n print(output)\n\n # If redirected to a file, make the resulting script executable\n if not sys.stdout.isatty():\n try: os.fchmod(sys.stdout.fileno(), 0o700)\n except OSError: pass\n\nif __name__ == '__main__':\n pwnlib.commandline.common.main(__file__)\n", "path": "pwnlib/commandline/template.py"}]}
| 1,092 | 355 |
gh_patches_debug_20961
|
rasdani/github-patches
|
git_diff
|
netbox-community__netbox-16094
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add current NetBox version in the `PluginConfig.validate()` error output
### NetBox version
v4.0.1
### Feature type
New functionality
### Proposed functionality
When [`PluginConfig.validate()`](https://github.com/netbox-community/netbox/blob/4a64a3f6e0d0edf27996422eb2dbe0e197a6bea5/netbox/netbox/plugins/__init__.py#L133) determines that the current NetBox version does not meet the plugin requirements, also print out the current NetBox version in the exception message.
### Use case
Currently the error message only prints the version that the plugin mandates, but due to possible installation directory confusion it would be good to also print the current NetBox version. Example case: https://github.com/netbox-community/netbox/issues/16088
### Database changes
None
### External dependencies
None
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `netbox/netbox/plugins/__init__.py`
Content:
```
1 import collections
2 from importlib import import_module
3
4 from django.apps import AppConfig
5 from django.core.exceptions import ImproperlyConfigured
6 from django.utils.module_loading import import_string
7 from packaging import version
8
9 from netbox.registry import registry
10 from netbox.search import register_search
11 from netbox.utils import register_data_backend
12 from .navigation import *
13 from .registration import *
14 from .templates import *
15 from .utils import *
16
17 # Initialize plugin registry
18 registry['plugins'].update({
19 'graphql_schemas': [],
20 'menus': [],
21 'menu_items': {},
22 'preferences': {},
23 'template_extensions': collections.defaultdict(list),
24 })
25
26 DEFAULT_RESOURCE_PATHS = {
27 'search_indexes': 'search.indexes',
28 'data_backends': 'data_backends.backends',
29 'graphql_schema': 'graphql.schema',
30 'menu': 'navigation.menu',
31 'menu_items': 'navigation.menu_items',
32 'template_extensions': 'template_content.template_extensions',
33 'user_preferences': 'preferences.preferences',
34 }
35
36
37 #
38 # Plugin AppConfig class
39 #
40
41 class PluginConfig(AppConfig):
42 """
43 Subclass of Django's built-in AppConfig class, to be used for NetBox plugins.
44 """
45 # Plugin metadata
46 author = ''
47 author_email = ''
48 description = ''
49 version = ''
50
51 # Root URL path under /plugins. If not set, the plugin's label will be used.
52 base_url = None
53
54 # Minimum/maximum compatible versions of NetBox
55 min_version = None
56 max_version = None
57
58 # Default configuration parameters
59 default_settings = {}
60
61 # Mandatory configuration parameters
62 required_settings = []
63
64 # Middleware classes provided by the plugin
65 middleware = []
66
67 # Django-rq queues dedicated to the plugin
68 queues = []
69
70 # Django apps to append to INSTALLED_APPS when plugin requires them.
71 django_apps = []
72
73 # Optional plugin resources
74 search_indexes = None
75 data_backends = None
76 graphql_schema = None
77 menu = None
78 menu_items = None
79 template_extensions = None
80 user_preferences = None
81
82 def _load_resource(self, name):
83 # Import from the configured path, if defined.
84 if path := getattr(self, name, None):
85 return import_string(f"{self.__module__}.{path}")
86
87 # Fall back to the resource's default path. Return None if the module has not been provided.
88 default_path = f'{self.__module__}.{DEFAULT_RESOURCE_PATHS[name]}'
89 default_module, resource_name = default_path.rsplit('.', 1)
90 try:
91 module = import_module(default_module)
92 return getattr(module, resource_name, None)
93 except ModuleNotFoundError:
94 pass
95
96 def ready(self):
97 from netbox.models.features import register_models
98
99 # Register models
100 register_models(*self.get_models())
101
102 plugin_name = self.name.rsplit('.', 1)[-1]
103
104 # Register search extensions (if defined)
105 search_indexes = self._load_resource('search_indexes') or []
106 for idx in search_indexes:
107 register_search(idx)
108
109 # Register data backends (if defined)
110 data_backends = self._load_resource('data_backends') or []
111 for backend in data_backends:
112 register_data_backend()(backend)
113
114 # Register template content (if defined)
115 if template_extensions := self._load_resource('template_extensions'):
116 register_template_extensions(template_extensions)
117
118 # Register navigation menu and/or menu items (if defined)
119 if menu := self._load_resource('menu'):
120 register_menu(menu)
121 if menu_items := self._load_resource('menu_items'):
122 register_menu_items(self.verbose_name, menu_items)
123
124 # Register GraphQL schema (if defined)
125 if graphql_schema := self._load_resource('graphql_schema'):
126 register_graphql_schema(graphql_schema)
127
128 # Register user preferences (if defined)
129 if user_preferences := self._load_resource('user_preferences'):
130 register_user_preferences(plugin_name, user_preferences)
131
132 @classmethod
133 def validate(cls, user_config, netbox_version):
134
135 # Enforce version constraints
136 current_version = version.parse(netbox_version)
137 if cls.min_version is not None:
138 min_version = version.parse(cls.min_version)
139 if current_version < min_version:
140 raise ImproperlyConfigured(
141 f"Plugin {cls.__module__} requires NetBox minimum version {cls.min_version}."
142 )
143 if cls.max_version is not None:
144 max_version = version.parse(cls.max_version)
145 if current_version > max_version:
146 raise ImproperlyConfigured(
147 f"Plugin {cls.__module__} requires NetBox maximum version {cls.max_version}."
148 )
149
150 # Verify required configuration settings
151 for setting in cls.required_settings:
152 if setting not in user_config:
153 raise ImproperlyConfigured(
154 f"Plugin {cls.__module__} requires '{setting}' to be present in the PLUGINS_CONFIG section of "
155 f"configuration.py."
156 )
157
158 # Apply default configuration values
159 for setting, value in cls.default_settings.items():
160 if setting not in user_config:
161 user_config[setting] = value
162
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/netbox/netbox/plugins/__init__.py b/netbox/netbox/plugins/__init__.py
--- a/netbox/netbox/plugins/__init__.py
+++ b/netbox/netbox/plugins/__init__.py
@@ -138,13 +138,15 @@
min_version = version.parse(cls.min_version)
if current_version < min_version:
raise ImproperlyConfigured(
- f"Plugin {cls.__module__} requires NetBox minimum version {cls.min_version}."
+ f"Plugin {cls.__module__} requires NetBox minimum version {cls.min_version} (current: "
+ f"{netbox_version})."
)
if cls.max_version is not None:
max_version = version.parse(cls.max_version)
if current_version > max_version:
raise ImproperlyConfigured(
- f"Plugin {cls.__module__} requires NetBox maximum version {cls.max_version}."
+ f"Plugin {cls.__module__} requires NetBox maximum version {cls.max_version} (current: "
+ f"{netbox_version})."
)
# Verify required configuration settings
|
{"golden_diff": "diff --git a/netbox/netbox/plugins/__init__.py b/netbox/netbox/plugins/__init__.py\n--- a/netbox/netbox/plugins/__init__.py\n+++ b/netbox/netbox/plugins/__init__.py\n@@ -138,13 +138,15 @@\n min_version = version.parse(cls.min_version)\n if current_version < min_version:\n raise ImproperlyConfigured(\n- f\"Plugin {cls.__module__} requires NetBox minimum version {cls.min_version}.\"\n+ f\"Plugin {cls.__module__} requires NetBox minimum version {cls.min_version} (current: \"\n+ f\"{netbox_version}).\"\n )\n if cls.max_version is not None:\n max_version = version.parse(cls.max_version)\n if current_version > max_version:\n raise ImproperlyConfigured(\n- f\"Plugin {cls.__module__} requires NetBox maximum version {cls.max_version}.\"\n+ f\"Plugin {cls.__module__} requires NetBox maximum version {cls.max_version} (current: \"\n+ f\"{netbox_version}).\"\n )\n \n # Verify required configuration settings\n", "issue": "Add current NetBox version in the `PluginConfig.validate()` error output\n### NetBox version\n\nv4.0.1\n\n### Feature type\n\nNew functionality\n\n### Proposed functionality\n\nWhen [`PluginConfig.validate()`](https://github.com/netbox-community/netbox/blob/4a64a3f6e0d0edf27996422eb2dbe0e197a6bea5/netbox/netbox/plugins/__init__.py#L133) determines that the current NetBox version does not meet the plugin requirements, also print out the current NetBox version in the exception message.\n\n### Use case\n\nCurrently the error message only prints the version that the plugin mandates, but due to possible installation directory confusion it would be good to also print the current NetBox version. Example case: https://github.com/netbox-community/netbox/issues/16088\r\n\n\n### Database changes\n\nNone\n\n### External dependencies\n\nNone\n", "before_files": [{"content": "import collections\nfrom importlib import import_module\n\nfrom django.apps import AppConfig\nfrom django.core.exceptions import ImproperlyConfigured\nfrom django.utils.module_loading import import_string\nfrom packaging import version\n\nfrom netbox.registry import registry\nfrom netbox.search import register_search\nfrom netbox.utils import register_data_backend\nfrom .navigation import *\nfrom .registration import *\nfrom .templates import *\nfrom .utils import *\n\n# Initialize plugin registry\nregistry['plugins'].update({\n 'graphql_schemas': [],\n 'menus': [],\n 'menu_items': {},\n 'preferences': {},\n 'template_extensions': collections.defaultdict(list),\n})\n\nDEFAULT_RESOURCE_PATHS = {\n 'search_indexes': 'search.indexes',\n 'data_backends': 'data_backends.backends',\n 'graphql_schema': 'graphql.schema',\n 'menu': 'navigation.menu',\n 'menu_items': 'navigation.menu_items',\n 'template_extensions': 'template_content.template_extensions',\n 'user_preferences': 'preferences.preferences',\n}\n\n\n#\n# Plugin AppConfig class\n#\n\nclass PluginConfig(AppConfig):\n \"\"\"\n Subclass of Django's built-in AppConfig class, to be used for NetBox plugins.\n \"\"\"\n # Plugin metadata\n author = ''\n author_email = ''\n description = ''\n version = ''\n\n # Root URL path under /plugins. If not set, the plugin's label will be used.\n base_url = None\n\n # Minimum/maximum compatible versions of NetBox\n min_version = None\n max_version = None\n\n # Default configuration parameters\n default_settings = {}\n\n # Mandatory configuration parameters\n required_settings = []\n\n # Middleware classes provided by the plugin\n middleware = []\n\n # Django-rq queues dedicated to the plugin\n queues = []\n\n # Django apps to append to INSTALLED_APPS when plugin requires them.\n django_apps = []\n\n # Optional plugin resources\n search_indexes = None\n data_backends = None\n graphql_schema = None\n menu = None\n menu_items = None\n template_extensions = None\n user_preferences = None\n\n def _load_resource(self, name):\n # Import from the configured path, if defined.\n if path := getattr(self, name, None):\n return import_string(f\"{self.__module__}.{path}\")\n\n # Fall back to the resource's default path. Return None if the module has not been provided.\n default_path = f'{self.__module__}.{DEFAULT_RESOURCE_PATHS[name]}'\n default_module, resource_name = default_path.rsplit('.', 1)\n try:\n module = import_module(default_module)\n return getattr(module, resource_name, None)\n except ModuleNotFoundError:\n pass\n\n def ready(self):\n from netbox.models.features import register_models\n\n # Register models\n register_models(*self.get_models())\n\n plugin_name = self.name.rsplit('.', 1)[-1]\n\n # Register search extensions (if defined)\n search_indexes = self._load_resource('search_indexes') or []\n for idx in search_indexes:\n register_search(idx)\n\n # Register data backends (if defined)\n data_backends = self._load_resource('data_backends') or []\n for backend in data_backends:\n register_data_backend()(backend)\n\n # Register template content (if defined)\n if template_extensions := self._load_resource('template_extensions'):\n register_template_extensions(template_extensions)\n\n # Register navigation menu and/or menu items (if defined)\n if menu := self._load_resource('menu'):\n register_menu(menu)\n if menu_items := self._load_resource('menu_items'):\n register_menu_items(self.verbose_name, menu_items)\n\n # Register GraphQL schema (if defined)\n if graphql_schema := self._load_resource('graphql_schema'):\n register_graphql_schema(graphql_schema)\n\n # Register user preferences (if defined)\n if user_preferences := self._load_resource('user_preferences'):\n register_user_preferences(plugin_name, user_preferences)\n\n @classmethod\n def validate(cls, user_config, netbox_version):\n\n # Enforce version constraints\n current_version = version.parse(netbox_version)\n if cls.min_version is not None:\n min_version = version.parse(cls.min_version)\n if current_version < min_version:\n raise ImproperlyConfigured(\n f\"Plugin {cls.__module__} requires NetBox minimum version {cls.min_version}.\"\n )\n if cls.max_version is not None:\n max_version = version.parse(cls.max_version)\n if current_version > max_version:\n raise ImproperlyConfigured(\n f\"Plugin {cls.__module__} requires NetBox maximum version {cls.max_version}.\"\n )\n\n # Verify required configuration settings\n for setting in cls.required_settings:\n if setting not in user_config:\n raise ImproperlyConfigured(\n f\"Plugin {cls.__module__} requires '{setting}' to be present in the PLUGINS_CONFIG section of \"\n f\"configuration.py.\"\n )\n\n # Apply default configuration values\n for setting, value in cls.default_settings.items():\n if setting not in user_config:\n user_config[setting] = value\n", "path": "netbox/netbox/plugins/__init__.py"}], "after_files": [{"content": "import collections\nfrom importlib import import_module\n\nfrom django.apps import AppConfig\nfrom django.core.exceptions import ImproperlyConfigured\nfrom django.utils.module_loading import import_string\nfrom packaging import version\n\nfrom netbox.registry import registry\nfrom netbox.search import register_search\nfrom netbox.utils import register_data_backend\nfrom .navigation import *\nfrom .registration import *\nfrom .templates import *\nfrom .utils import *\n\n# Initialize plugin registry\nregistry['plugins'].update({\n 'graphql_schemas': [],\n 'menus': [],\n 'menu_items': {},\n 'preferences': {},\n 'template_extensions': collections.defaultdict(list),\n})\n\nDEFAULT_RESOURCE_PATHS = {\n 'search_indexes': 'search.indexes',\n 'data_backends': 'data_backends.backends',\n 'graphql_schema': 'graphql.schema',\n 'menu': 'navigation.menu',\n 'menu_items': 'navigation.menu_items',\n 'template_extensions': 'template_content.template_extensions',\n 'user_preferences': 'preferences.preferences',\n}\n\n\n#\n# Plugin AppConfig class\n#\n\nclass PluginConfig(AppConfig):\n \"\"\"\n Subclass of Django's built-in AppConfig class, to be used for NetBox plugins.\n \"\"\"\n # Plugin metadata\n author = ''\n author_email = ''\n description = ''\n version = ''\n\n # Root URL path under /plugins. If not set, the plugin's label will be used.\n base_url = None\n\n # Minimum/maximum compatible versions of NetBox\n min_version = None\n max_version = None\n\n # Default configuration parameters\n default_settings = {}\n\n # Mandatory configuration parameters\n required_settings = []\n\n # Middleware classes provided by the plugin\n middleware = []\n\n # Django-rq queues dedicated to the plugin\n queues = []\n\n # Django apps to append to INSTALLED_APPS when plugin requires them.\n django_apps = []\n\n # Optional plugin resources\n search_indexes = None\n data_backends = None\n graphql_schema = None\n menu = None\n menu_items = None\n template_extensions = None\n user_preferences = None\n\n def _load_resource(self, name):\n # Import from the configured path, if defined.\n if path := getattr(self, name, None):\n return import_string(f\"{self.__module__}.{path}\")\n\n # Fall back to the resource's default path. Return None if the module has not been provided.\n default_path = f'{self.__module__}.{DEFAULT_RESOURCE_PATHS[name]}'\n default_module, resource_name = default_path.rsplit('.', 1)\n try:\n module = import_module(default_module)\n return getattr(module, resource_name, None)\n except ModuleNotFoundError:\n pass\n\n def ready(self):\n from netbox.models.features import register_models\n\n # Register models\n register_models(*self.get_models())\n\n plugin_name = self.name.rsplit('.', 1)[-1]\n\n # Register search extensions (if defined)\n search_indexes = self._load_resource('search_indexes') or []\n for idx in search_indexes:\n register_search(idx)\n\n # Register data backends (if defined)\n data_backends = self._load_resource('data_backends') or []\n for backend in data_backends:\n register_data_backend()(backend)\n\n # Register template content (if defined)\n if template_extensions := self._load_resource('template_extensions'):\n register_template_extensions(template_extensions)\n\n # Register navigation menu and/or menu items (if defined)\n if menu := self._load_resource('menu'):\n register_menu(menu)\n if menu_items := self._load_resource('menu_items'):\n register_menu_items(self.verbose_name, menu_items)\n\n # Register GraphQL schema (if defined)\n if graphql_schema := self._load_resource('graphql_schema'):\n register_graphql_schema(graphql_schema)\n\n # Register user preferences (if defined)\n if user_preferences := self._load_resource('user_preferences'):\n register_user_preferences(plugin_name, user_preferences)\n\n @classmethod\n def validate(cls, user_config, netbox_version):\n\n # Enforce version constraints\n current_version = version.parse(netbox_version)\n if cls.min_version is not None:\n min_version = version.parse(cls.min_version)\n if current_version < min_version:\n raise ImproperlyConfigured(\n f\"Plugin {cls.__module__} requires NetBox minimum version {cls.min_version} (current: \"\n f\"{netbox_version}).\"\n )\n if cls.max_version is not None:\n max_version = version.parse(cls.max_version)\n if current_version > max_version:\n raise ImproperlyConfigured(\n f\"Plugin {cls.__module__} requires NetBox maximum version {cls.max_version} (current: \"\n f\"{netbox_version}).\"\n )\n\n # Verify required configuration settings\n for setting in cls.required_settings:\n if setting not in user_config:\n raise ImproperlyConfigured(\n f\"Plugin {cls.__module__} requires '{setting}' to be present in the PLUGINS_CONFIG section of \"\n f\"configuration.py.\"\n )\n\n # Apply default configuration values\n for setting, value in cls.default_settings.items():\n if setting not in user_config:\n user_config[setting] = value\n", "path": "netbox/netbox/plugins/__init__.py"}]}
| 1,944 | 249 |
gh_patches_debug_15484
|
rasdani/github-patches
|
git_diff
|
pypi__warehouse-3239
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Legacy and Warehouse RSS feeds differ
@andrew asked in https://github.com/librariesio/libraries.io/issues/2024#issuecomment-372638824 about Warehouse's RSS feeds:
> * https://pypi.org/rss/updates.xml
> * https://pypi.org/rss/packages.xml
> Which I expected the contents to match the old ones but currently don't:
> * https://pypi.python.org/pypi?%3Aaction=rss
> * https://pypi.python.org/pypi?%3Aaction=packages_rss
I've verified through visual inspection that the data in the legacy RSS feed and the data in the Warehouse RSS feeds differ.
This is a bug in the feeds or a bug in the docs. Currently our [feeds documentation](https://warehouse.readthedocs.io/api-reference/feeds/) and [Warehouse migration guide](https://warehouse.readthedocs.io/api-reference/integration-guide/) don't say anything about deliberate differences between the legacy and Warehouse RSS feeds. We can update the docs if there's a deliberate reason for the difference.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `warehouse/rss/views.py`
Content:
```
1 # Licensed under the Apache License, Version 2.0 (the "License");
2 # you may not use this file except in compliance with the License.
3 # You may obtain a copy of the License at
4 #
5 # http://www.apache.org/licenses/LICENSE-2.0
6 #
7 # Unless required by applicable law or agreed to in writing, software
8 # distributed under the License is distributed on an "AS IS" BASIS,
9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10 # See the License for the specific language governing permissions and
11 # limitations under the License.
12
13 from pyramid.view import view_config
14 from sqlalchemy.orm import joinedload
15
16 from warehouse.cache.origin import origin_cache
17 from warehouse.packaging.models import Project, Release
18 from warehouse.xml import XML_CSP
19
20
21 @view_config(
22 route_name="rss.updates",
23 renderer="rss/updates.xml",
24 decorator=[
25 origin_cache(
26 1 * 24 * 60 * 60, # 1 day
27 stale_while_revalidate=1 * 24 * 60 * 60, # 1 day
28 stale_if_error=5 * 24 * 60 * 60, # 5 days
29 ),
30 ],
31 )
32 def rss_updates(request):
33 request.response.content_type = "text/xml"
34
35 request.find_service(name="csp").merge(XML_CSP)
36
37 latest_releases = (
38 request.db.query(Release)
39 .options(joinedload(Release.project))
40 .order_by(Release.created.desc())
41 .limit(40)
42 .all()
43 )
44
45 return {"latest_releases": latest_releases}
46
47
48 @view_config(
49 route_name="rss.packages",
50 renderer="rss/packages.xml",
51 decorator=[
52 origin_cache(
53 1 * 24 * 60 * 60, # 1 day
54 stale_while_revalidate=1 * 24 * 60 * 60, # 1 day
55 stale_if_error=5 * 24 * 60 * 60, # 5 days
56 ),
57 ],
58 )
59 def rss_packages(request):
60 request.response.content_type = "text/xml"
61
62 request.find_service(name="csp").merge(XML_CSP)
63
64 newest_projects = (
65 request.db.query(Project)
66 .options(joinedload(Project.releases, innerjoin=True))
67 .order_by(Project.created.desc())
68 .limit(40)
69 .all()
70 )
71
72 return {"newest_projects": newest_projects}
73
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/warehouse/rss/views.py b/warehouse/rss/views.py
--- a/warehouse/rss/views.py
+++ b/warehouse/rss/views.py
@@ -26,6 +26,7 @@
1 * 24 * 60 * 60, # 1 day
stale_while_revalidate=1 * 24 * 60 * 60, # 1 day
stale_if_error=5 * 24 * 60 * 60, # 5 days
+ keys=["all-projects"],
),
],
)
@@ -53,6 +54,7 @@
1 * 24 * 60 * 60, # 1 day
stale_while_revalidate=1 * 24 * 60 * 60, # 1 day
stale_if_error=5 * 24 * 60 * 60, # 5 days
+ keys=["all-projects"],
),
],
)
|
{"golden_diff": "diff --git a/warehouse/rss/views.py b/warehouse/rss/views.py\n--- a/warehouse/rss/views.py\n+++ b/warehouse/rss/views.py\n@@ -26,6 +26,7 @@\n 1 * 24 * 60 * 60, # 1 day\n stale_while_revalidate=1 * 24 * 60 * 60, # 1 day\n stale_if_error=5 * 24 * 60 * 60, # 5 days\n+ keys=[\"all-projects\"],\n ),\n ],\n )\n@@ -53,6 +54,7 @@\n 1 * 24 * 60 * 60, # 1 day\n stale_while_revalidate=1 * 24 * 60 * 60, # 1 day\n stale_if_error=5 * 24 * 60 * 60, # 5 days\n+ keys=[\"all-projects\"],\n ),\n ],\n )\n", "issue": "Legacy and Warehouse RSS feeds differ\n@andrew asked in https://github.com/librariesio/libraries.io/issues/2024#issuecomment-372638824 about Warehouse's RSS feeds:\r\n\r\n> * https://pypi.org/rss/updates.xml\r\n> * https://pypi.org/rss/packages.xml\r\n\r\n> Which I expected the contents to match the old ones but currently don't:\r\n\r\n> * https://pypi.python.org/pypi?%3Aaction=rss\r\n> * https://pypi.python.org/pypi?%3Aaction=packages_rss\r\n\r\nI've verified through visual inspection that the data in the legacy RSS feed and the data in the Warehouse RSS feeds differ.\r\n\r\nThis is a bug in the feeds or a bug in the docs. Currently our [feeds documentation](https://warehouse.readthedocs.io/api-reference/feeds/) and [Warehouse migration guide](https://warehouse.readthedocs.io/api-reference/integration-guide/) don't say anything about deliberate differences between the legacy and Warehouse RSS feeds. We can update the docs if there's a deliberate reason for the difference.\n", "before_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom pyramid.view import view_config\nfrom sqlalchemy.orm import joinedload\n\nfrom warehouse.cache.origin import origin_cache\nfrom warehouse.packaging.models import Project, Release\nfrom warehouse.xml import XML_CSP\n\n\n@view_config(\n route_name=\"rss.updates\",\n renderer=\"rss/updates.xml\",\n decorator=[\n origin_cache(\n 1 * 24 * 60 * 60, # 1 day\n stale_while_revalidate=1 * 24 * 60 * 60, # 1 day\n stale_if_error=5 * 24 * 60 * 60, # 5 days\n ),\n ],\n)\ndef rss_updates(request):\n request.response.content_type = \"text/xml\"\n\n request.find_service(name=\"csp\").merge(XML_CSP)\n\n latest_releases = (\n request.db.query(Release)\n .options(joinedload(Release.project))\n .order_by(Release.created.desc())\n .limit(40)\n .all()\n )\n\n return {\"latest_releases\": latest_releases}\n\n\n@view_config(\n route_name=\"rss.packages\",\n renderer=\"rss/packages.xml\",\n decorator=[\n origin_cache(\n 1 * 24 * 60 * 60, # 1 day\n stale_while_revalidate=1 * 24 * 60 * 60, # 1 day\n stale_if_error=5 * 24 * 60 * 60, # 5 days\n ),\n ],\n)\ndef rss_packages(request):\n request.response.content_type = \"text/xml\"\n\n request.find_service(name=\"csp\").merge(XML_CSP)\n\n newest_projects = (\n request.db.query(Project)\n .options(joinedload(Project.releases, innerjoin=True))\n .order_by(Project.created.desc())\n .limit(40)\n .all()\n )\n\n return {\"newest_projects\": newest_projects}\n", "path": "warehouse/rss/views.py"}], "after_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom pyramid.view import view_config\nfrom sqlalchemy.orm import joinedload\n\nfrom warehouse.cache.origin import origin_cache\nfrom warehouse.packaging.models import Project, Release\nfrom warehouse.xml import XML_CSP\n\n\n@view_config(\n route_name=\"rss.updates\",\n renderer=\"rss/updates.xml\",\n decorator=[\n origin_cache(\n 1 * 24 * 60 * 60, # 1 day\n stale_while_revalidate=1 * 24 * 60 * 60, # 1 day\n stale_if_error=5 * 24 * 60 * 60, # 5 days\n keys=[\"all-projects\"],\n ),\n ],\n)\ndef rss_updates(request):\n request.response.content_type = \"text/xml\"\n\n request.find_service(name=\"csp\").merge(XML_CSP)\n\n latest_releases = (\n request.db.query(Release)\n .options(joinedload(Release.project))\n .order_by(Release.created.desc())\n .limit(40)\n .all()\n )\n\n return {\"latest_releases\": latest_releases}\n\n\n@view_config(\n route_name=\"rss.packages\",\n renderer=\"rss/packages.xml\",\n decorator=[\n origin_cache(\n 1 * 24 * 60 * 60, # 1 day\n stale_while_revalidate=1 * 24 * 60 * 60, # 1 day\n stale_if_error=5 * 24 * 60 * 60, # 5 days\n keys=[\"all-projects\"],\n ),\n ],\n)\ndef rss_packages(request):\n request.response.content_type = \"text/xml\"\n\n request.find_service(name=\"csp\").merge(XML_CSP)\n\n newest_projects = (\n request.db.query(Project)\n .options(joinedload(Project.releases, innerjoin=True))\n .order_by(Project.created.desc())\n .limit(40)\n .all()\n )\n\n return {\"newest_projects\": newest_projects}\n", "path": "warehouse/rss/views.py"}]}
| 1,173 | 228 |
gh_patches_debug_34793
|
rasdani/github-patches
|
git_diff
|
cloud-custodian__cloud-custodian-2571
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
azure - lower az log verbosity when running custodian commands without -v
currently, c7n_azure will log info about the session it's using with normal c7n commands and is much more verbose than what it used to be, moved the log level from info to debug so it's still accessible with the -v flag
```
$ custodian schema network-addr.filters.shield-enabled -v
2018-06-19 09:42:36,028: cli.azure.cli.core:DEBUG Current cloud config:
AzureCloud
2018-06-19 09:42:36,029: custodian.azure.session:DEBUG Creating session with Azure CLI Authentication
2018-06-19 09:42:36,029: custodian.azure.session:DEBUG Session using Subscription ID: xxxxxxxxxxxxxxxxxxxxxxxxxxx
Help
----
The most base type
Schema
------
{
"additionalProperties": false,
"required": [
"type"
],
"type": "object",
"properties": {
"state": {
"type": "boolean"
},
"type": {
"enum": [
"shield-enabled"
]
}
}
}
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `tools/c7n_azure/c7n_azure/session.py`
Content:
```
1 # Copyright 2018 Capital One Services, LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import importlib
16 import os
17 import logging
18 from azure.cli.core.cloud import AZURE_PUBLIC_CLOUD
19 from azure.cli.core._profile import Profile
20 from azure.common.credentials import ServicePrincipalCredentials, BasicTokenAuthentication
21 from c7n_azure.utils import ResourceIdParser
22
23
24 class Session(object):
25
26 def __init__(self, subscription_id=None):
27 """
28 Creates a session using available authentication type.
29
30 Auth priority:
31 1. Token Auth
32 2. Tenant Auth
33 3. Azure CLI Auth
34
35 :param subscription_id: If provided, overrides environment variables.
36 """
37
38 self.log = logging.getLogger('custodian.azure.session')
39 self._provider_cache = {}
40
41 tenant_auth_variables = [
42 'AZURE_TENANT_ID', 'AZURE_SUBSCRIPTION_ID',
43 'AZURE_CLIENT_ID', 'AZURE_CLIENT_SECRET'
44 ]
45 token_auth_variables = ['AZURE_ACCESS_TOKEN', 'AZURE_SUBSCRIPTION_ID']
46
47 if all(k in os.environ for k in token_auth_variables):
48 # Token authentication
49 self.credentials = BasicTokenAuthentication(
50 token={
51 'access_token': os.environ['AZURE_ACCESS_TOKEN']
52 })
53 self.subscription_id = os.environ['AZURE_SUBSCRIPTION_ID']
54 self.log.info("Creating session with Token Authentication")
55
56 elif all(k in os.environ for k in tenant_auth_variables):
57 # Tenant (service principal) authentication
58 self.credentials = ServicePrincipalCredentials(
59 client_id=os.environ['AZURE_CLIENT_ID'],
60 secret=os.environ['AZURE_CLIENT_SECRET'],
61 tenant=os.environ['AZURE_TENANT_ID']
62 )
63 self.subscription_id = os.environ['AZURE_SUBSCRIPTION_ID']
64 self.tenant_id = os.environ['AZURE_TENANT_ID']
65 self.log.info("Creating session with Service Principal Authentication")
66
67 else:
68 # Azure CLI authentication
69 (self.credentials,
70 self.subscription_id,
71 self.tenant_id) = Profile().get_login_credentials(
72 resource=AZURE_PUBLIC_CLOUD.endpoints.active_directory_resource_id)
73 self.log.info("Creating session with Azure CLI Authentication")
74
75 # Let provided id parameter override everything else
76 if subscription_id is not None:
77 self.subscription_id = subscription_id
78
79 self.log.info("Session using Subscription ID: %s" % self.subscription_id)
80
81 if self.credentials is None:
82 self.log.error('Unable to locate credentials for Azure session.')
83
84 def client(self, client):
85 service_name, client_name = client.rsplit('.', 1)
86 svc_module = importlib.import_module(service_name)
87 klass = getattr(svc_module, client_name)
88 return klass(self.credentials, self.subscription_id)
89
90 def resource_api_version(self, resource_id):
91 """ latest non-preview api version for resource """
92
93 namespace = ResourceIdParser.get_namespace(resource_id)
94 resource_type = ResourceIdParser.get_resource_type(resource_id)
95
96 if resource_type in self._provider_cache:
97 return self._provider_cache[resource_type]
98
99 resource_client = self.client('azure.mgmt.resource.ResourceManagementClient')
100 provider = resource_client.providers.get(namespace)
101
102 rt = next((t for t in provider.resource_types
103 if t.resource_type == str(resource_type).split('/')[-1]), None)
104 if rt and rt.api_versions:
105 versions = [v for v in rt.api_versions if 'preview' not in v.lower()]
106 api_version = versions[0] if versions else rt.api_versions[0]
107 self._provider_cache[resource_type] = api_version
108 return api_version
109
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/tools/c7n_azure/c7n_azure/session.py b/tools/c7n_azure/c7n_azure/session.py
--- a/tools/c7n_azure/c7n_azure/session.py
+++ b/tools/c7n_azure/c7n_azure/session.py
@@ -24,6 +24,19 @@
class Session(object):
def __init__(self, subscription_id=None):
+ """
+ :param subscription_id: If provided overrides environment variables.
+
+ """
+
+ self.log = logging.getLogger('custodian.azure.session')
+ self._provider_cache = {}
+ self.subscription_id_override = subscription_id
+ self.credentials = None
+ self.subscription_id = None
+ self.tenant_id = None
+
+ def _initialize_session(self):
"""
Creates a session using available authentication type.
@@ -32,11 +45,11 @@
2. Tenant Auth
3. Azure CLI Auth
- :param subscription_id: If provided, overrides environment variables.
"""
- self.log = logging.getLogger('custodian.azure.session')
- self._provider_cache = {}
+ # Only run once
+ if self.credentials is not None:
+ return
tenant_auth_variables = [
'AZURE_TENANT_ID', 'AZURE_SUBSCRIPTION_ID',
@@ -73,8 +86,8 @@
self.log.info("Creating session with Azure CLI Authentication")
# Let provided id parameter override everything else
- if subscription_id is not None:
- self.subscription_id = subscription_id
+ if self.subscription_id_override is not None:
+ self.subscription_id = self.subscription_id_override
self.log.info("Session using Subscription ID: %s" % self.subscription_id)
@@ -82,6 +95,7 @@
self.log.error('Unable to locate credentials for Azure session.')
def client(self, client):
+ self._initialize_session()
service_name, client_name = client.rsplit('.', 1)
svc_module = importlib.import_module(service_name)
klass = getattr(svc_module, client_name)
|
{"golden_diff": "diff --git a/tools/c7n_azure/c7n_azure/session.py b/tools/c7n_azure/c7n_azure/session.py\n--- a/tools/c7n_azure/c7n_azure/session.py\n+++ b/tools/c7n_azure/c7n_azure/session.py\n@@ -24,6 +24,19 @@\n class Session(object):\n \n def __init__(self, subscription_id=None):\n+ \"\"\"\n+ :param subscription_id: If provided overrides environment variables.\n+\n+ \"\"\"\n+\n+ self.log = logging.getLogger('custodian.azure.session')\n+ self._provider_cache = {}\n+ self.subscription_id_override = subscription_id\n+ self.credentials = None\n+ self.subscription_id = None\n+ self.tenant_id = None\n+\n+ def _initialize_session(self):\n \"\"\"\n Creates a session using available authentication type.\n \n@@ -32,11 +45,11 @@\n 2. Tenant Auth\n 3. Azure CLI Auth\n \n- :param subscription_id: If provided, overrides environment variables.\n \"\"\"\n \n- self.log = logging.getLogger('custodian.azure.session')\n- self._provider_cache = {}\n+ # Only run once\n+ if self.credentials is not None:\n+ return\n \n tenant_auth_variables = [\n 'AZURE_TENANT_ID', 'AZURE_SUBSCRIPTION_ID',\n@@ -73,8 +86,8 @@\n self.log.info(\"Creating session with Azure CLI Authentication\")\n \n # Let provided id parameter override everything else\n- if subscription_id is not None:\n- self.subscription_id = subscription_id\n+ if self.subscription_id_override is not None:\n+ self.subscription_id = self.subscription_id_override\n \n self.log.info(\"Session using Subscription ID: %s\" % self.subscription_id)\n \n@@ -82,6 +95,7 @@\n self.log.error('Unable to locate credentials for Azure session.')\n \n def client(self, client):\n+ self._initialize_session()\n service_name, client_name = client.rsplit('.', 1)\n svc_module = importlib.import_module(service_name)\n klass = getattr(svc_module, client_name)\n", "issue": "azure - lower az log verbosity when running custodian commands without -v\ncurrently, c7n_azure will log info about the session it's using with normal c7n commands and is much more verbose than what it used to be, moved the log level from info to debug so it's still accessible with the -v flag\r\n\r\n```\r\n$ custodian schema network-addr.filters.shield-enabled -v\r\n2018-06-19 09:42:36,028: cli.azure.cli.core:DEBUG Current cloud config:\r\nAzureCloud\r\n2018-06-19 09:42:36,029: custodian.azure.session:DEBUG Creating session with Azure CLI Authentication\r\n2018-06-19 09:42:36,029: custodian.azure.session:DEBUG Session using Subscription ID: xxxxxxxxxxxxxxxxxxxxxxxxxxx\r\n\r\nHelp\r\n----\r\n\r\nThe most base type\r\n\r\nSchema\r\n------\r\n\r\n{\r\n \"additionalProperties\": false,\r\n \"required\": [\r\n \"type\"\r\n ],\r\n \"type\": \"object\",\r\n \"properties\": {\r\n \"state\": {\r\n \"type\": \"boolean\"\r\n },\r\n \"type\": {\r\n \"enum\": [\r\n \"shield-enabled\"\r\n ]\r\n }\r\n }\r\n}\r\n```\n", "before_files": [{"content": "# Copyright 2018 Capital One Services, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport importlib\nimport os\nimport logging\nfrom azure.cli.core.cloud import AZURE_PUBLIC_CLOUD\nfrom azure.cli.core._profile import Profile\nfrom azure.common.credentials import ServicePrincipalCredentials, BasicTokenAuthentication\nfrom c7n_azure.utils import ResourceIdParser\n\n\nclass Session(object):\n\n def __init__(self, subscription_id=None):\n \"\"\"\n Creates a session using available authentication type.\n\n Auth priority:\n 1. Token Auth\n 2. Tenant Auth\n 3. Azure CLI Auth\n\n :param subscription_id: If provided, overrides environment variables.\n \"\"\"\n\n self.log = logging.getLogger('custodian.azure.session')\n self._provider_cache = {}\n\n tenant_auth_variables = [\n 'AZURE_TENANT_ID', 'AZURE_SUBSCRIPTION_ID',\n 'AZURE_CLIENT_ID', 'AZURE_CLIENT_SECRET'\n ]\n token_auth_variables = ['AZURE_ACCESS_TOKEN', 'AZURE_SUBSCRIPTION_ID']\n\n if all(k in os.environ for k in token_auth_variables):\n # Token authentication\n self.credentials = BasicTokenAuthentication(\n token={\n 'access_token': os.environ['AZURE_ACCESS_TOKEN']\n })\n self.subscription_id = os.environ['AZURE_SUBSCRIPTION_ID']\n self.log.info(\"Creating session with Token Authentication\")\n\n elif all(k in os.environ for k in tenant_auth_variables):\n # Tenant (service principal) authentication\n self.credentials = ServicePrincipalCredentials(\n client_id=os.environ['AZURE_CLIENT_ID'],\n secret=os.environ['AZURE_CLIENT_SECRET'],\n tenant=os.environ['AZURE_TENANT_ID']\n )\n self.subscription_id = os.environ['AZURE_SUBSCRIPTION_ID']\n self.tenant_id = os.environ['AZURE_TENANT_ID']\n self.log.info(\"Creating session with Service Principal Authentication\")\n\n else:\n # Azure CLI authentication\n (self.credentials,\n self.subscription_id,\n self.tenant_id) = Profile().get_login_credentials(\n resource=AZURE_PUBLIC_CLOUD.endpoints.active_directory_resource_id)\n self.log.info(\"Creating session with Azure CLI Authentication\")\n\n # Let provided id parameter override everything else\n if subscription_id is not None:\n self.subscription_id = subscription_id\n\n self.log.info(\"Session using Subscription ID: %s\" % self.subscription_id)\n\n if self.credentials is None:\n self.log.error('Unable to locate credentials for Azure session.')\n\n def client(self, client):\n service_name, client_name = client.rsplit('.', 1)\n svc_module = importlib.import_module(service_name)\n klass = getattr(svc_module, client_name)\n return klass(self.credentials, self.subscription_id)\n\n def resource_api_version(self, resource_id):\n \"\"\" latest non-preview api version for resource \"\"\"\n\n namespace = ResourceIdParser.get_namespace(resource_id)\n resource_type = ResourceIdParser.get_resource_type(resource_id)\n\n if resource_type in self._provider_cache:\n return self._provider_cache[resource_type]\n\n resource_client = self.client('azure.mgmt.resource.ResourceManagementClient')\n provider = resource_client.providers.get(namespace)\n\n rt = next((t for t in provider.resource_types\n if t.resource_type == str(resource_type).split('/')[-1]), None)\n if rt and rt.api_versions:\n versions = [v for v in rt.api_versions if 'preview' not in v.lower()]\n api_version = versions[0] if versions else rt.api_versions[0]\n self._provider_cache[resource_type] = api_version\n return api_version\n", "path": "tools/c7n_azure/c7n_azure/session.py"}], "after_files": [{"content": "# Copyright 2018 Capital One Services, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport importlib\nimport os\nimport logging\nfrom azure.cli.core.cloud import AZURE_PUBLIC_CLOUD\nfrom azure.cli.core._profile import Profile\nfrom azure.common.credentials import ServicePrincipalCredentials, BasicTokenAuthentication\nfrom c7n_azure.utils import ResourceIdParser\n\n\nclass Session(object):\n\n def __init__(self, subscription_id=None):\n \"\"\"\n :param subscription_id: If provided overrides environment variables.\n\n \"\"\"\n\n self.log = logging.getLogger('custodian.azure.session')\n self._provider_cache = {}\n self.subscription_id_override = subscription_id\n self.credentials = None\n self.subscription_id = None\n self.tenant_id = None\n\n def _initialize_session(self):\n \"\"\"\n Creates a session using available authentication type.\n\n Auth priority:\n 1. Token Auth\n 2. Tenant Auth\n 3. Azure CLI Auth\n\n \"\"\"\n\n # Only run once\n if self.credentials is not None:\n return\n\n tenant_auth_variables = [\n 'AZURE_TENANT_ID', 'AZURE_SUBSCRIPTION_ID',\n 'AZURE_CLIENT_ID', 'AZURE_CLIENT_SECRET'\n ]\n token_auth_variables = ['AZURE_ACCESS_TOKEN', 'AZURE_SUBSCRIPTION_ID']\n\n if all(k in os.environ for k in token_auth_variables):\n # Token authentication\n self.credentials = BasicTokenAuthentication(\n token={\n 'access_token': os.environ['AZURE_ACCESS_TOKEN']\n })\n self.subscription_id = os.environ['AZURE_SUBSCRIPTION_ID']\n self.log.info(\"Creating session with Token Authentication\")\n\n elif all(k in os.environ for k in tenant_auth_variables):\n # Tenant (service principal) authentication\n self.credentials = ServicePrincipalCredentials(\n client_id=os.environ['AZURE_CLIENT_ID'],\n secret=os.environ['AZURE_CLIENT_SECRET'],\n tenant=os.environ['AZURE_TENANT_ID']\n )\n self.subscription_id = os.environ['AZURE_SUBSCRIPTION_ID']\n self.tenant_id = os.environ['AZURE_TENANT_ID']\n self.log.info(\"Creating session with Service Principal Authentication\")\n\n else:\n # Azure CLI authentication\n (self.credentials,\n self.subscription_id,\n self.tenant_id) = Profile().get_login_credentials(\n resource=AZURE_PUBLIC_CLOUD.endpoints.active_directory_resource_id)\n self.log.info(\"Creating session with Azure CLI Authentication\")\n\n # Let provided id parameter override everything else\n if self.subscription_id_override is not None:\n self.subscription_id = self.subscription_id_override\n\n self.log.info(\"Session using Subscription ID: %s\" % self.subscription_id)\n\n if self.credentials is None:\n self.log.error('Unable to locate credentials for Azure session.')\n\n def client(self, client):\n self._initialize_session()\n service_name, client_name = client.rsplit('.', 1)\n svc_module = importlib.import_module(service_name)\n klass = getattr(svc_module, client_name)\n return klass(self.credentials, self.subscription_id)\n\n def resource_api_version(self, resource_id):\n \"\"\" latest non-preview api version for resource \"\"\"\n\n namespace = ResourceIdParser.get_namespace(resource_id)\n resource_type = ResourceIdParser.get_resource_type(resource_id)\n\n if resource_type in self._provider_cache:\n return self._provider_cache[resource_type]\n\n resource_client = self.client('azure.mgmt.resource.ResourceManagementClient')\n provider = resource_client.providers.get(namespace)\n\n rt = next((t for t in provider.resource_types\n if t.resource_type == str(resource_type).split('/')[-1]), None)\n if rt and rt.api_versions:\n versions = [v for v in rt.api_versions if 'preview' not in v.lower()]\n api_version = versions[0] if versions else rt.api_versions[0]\n self._provider_cache[resource_type] = api_version\n return api_version\n", "path": "tools/c7n_azure/c7n_azure/session.py"}]}
| 1,649 | 469 |
gh_patches_debug_34983
|
rasdani/github-patches
|
git_diff
|
DataDog__dd-trace-py-3117
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Psycopg patching doesn't properly handle execute_values
The `execute_values` extension in psycopg2 composes and executes the query with b-string, even if you passed the query as a string. Below is the full function from psycopg2.extras
```python
def execute_values(cur, sql, argslist, template=None, page_size=100, fetch=False):
from psycopg2.sql import Composable
if isinstance(sql, Composable):
sql = sql.as_string(cur)
# we can't just use sql % vals because vals is bytes: if sql is bytes
# there will be some decoding error because of stupid codec used, and Py3
# doesn't implement % on bytes.
if not isinstance(sql, bytes):
sql = sql.encode(_ext.encodings[cur.connection.encoding])
pre, post = _split_sql(sql)
result = [] if fetch else None
for page in _paginate(argslist, page_size=page_size):
if template is None:
template = b'(' + b','.join([b'%s'] * len(page[0])) + b')'
parts = pre[:]
for args in page:
parts.append(cur.mogrify(template, args))
parts.append(b',')
parts[-1:] = post
cur.execute(b''.join(parts))
if fetch:
result.extend(cur.fetchall())
return result
```
The problem is that ddtrace assumes that the "resource" added to a span is a string. The result is that when `span.finish()` is called in the datadog lambda handler and it tries to serialize the span to json, it blows up with "TypeError: Object of type bytes is not JSON serializable". Upon investigation, I discovered that the ddtrace.internal.encoder.py's JSONEncoder just does a simple json.dumps() on all the spans and the `resource` attribute on the span from the using `execute_values` is bytes, not a string.
I think the solution here is simply to update the Psycopg2TracedCursor class to decode the resource from bytes if it is bytes, like this:
```python
class Psycopg2TracedCursor(dbapi.TracedCursor):
"""TracedCursor for psycopg2"""
def _trace_method(self, method, name, resource, extra_tags, *args, **kwargs):
# treat psycopg2.sql.Composable resource objects as strings
if isinstance(resource, Composable):
resource = resource.as_string(self.__wrapped__)
# THIS IS THE NEW PART BELOW (next 2 lines)
if isinstance(resource, bytes):
resource = resource.decode('utf-8')
return super(Psycopg2TracedCursor, self)._trace_method(method, name, resource, extra_tags, *args, **kwargs)
```
### Which version of dd-trace-py are you using?
Lambda layer, v50.
### Which version of pip are you using?
n/a
### How can we reproduce your problem?
Use `execute_values` while inside a tracing context. It should have a 100% failure rate.
### What is the result that you get?
A type error when span.finish() is called and the metrics are furnished to DD.
### What is the result that you expected?
It should work as normal, with the resource decoded as a string.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ddtrace/internal/encoding.py`
Content:
```
1 import json
2 from typing import Any
3 from typing import Dict
4 from typing import List
5 from typing import Optional
6 from typing import TYPE_CHECKING
7
8 from ._encoding import ListStringTable
9 from ._encoding import MsgpackEncoderV03
10 from ._encoding import MsgpackEncoderV05
11 from .logger import get_logger
12
13
14 __all__ = ["MsgpackEncoderV03", "MsgpackEncoderV05", "ListStringTable", "MSGPACK_ENCODERS"]
15
16
17 if TYPE_CHECKING:
18 from ..span import Span
19
20
21 log = get_logger(__name__)
22
23
24 class _EncoderBase(object):
25 """
26 Encoder interface that provides the logic to encode traces and service.
27 """
28
29 def encode_traces(self, traces):
30 # type: (List[List[Span]]) -> str
31 """
32 Encodes a list of traces, expecting a list of items where each items
33 is a list of spans. Before dumping the string in a serialized format all
34 traces are normalized according to the encoding format. The trace
35 nesting is not changed.
36
37 :param traces: A list of traces that should be serialized
38 """
39 raise NotImplementedError()
40
41 def encode(self, obj):
42 # type: (List[List[Any]]) -> str
43 """
44 Defines the underlying format used during traces or services encoding.
45 This method must be implemented and should only be used by the internal
46 functions.
47 """
48 raise NotImplementedError()
49
50
51 class JSONEncoder(_EncoderBase):
52 content_type = "application/json"
53
54 def encode_traces(self, traces):
55 normalized_traces = [[span.to_dict() for span in trace] for trace in traces]
56 return self.encode(normalized_traces)
57
58 @staticmethod
59 def encode(obj):
60 # type: (Any) -> str
61 return json.dumps(obj)
62
63
64 class JSONEncoderV2(JSONEncoder):
65 """
66 JSONEncoderV2 encodes traces to the new intake API format.
67 """
68
69 content_type = "application/json"
70
71 def encode_traces(self, traces):
72 # type: (List[List[Span]]) -> str
73 normalized_traces = [[JSONEncoderV2._convert_span(span) for span in trace] for trace in traces]
74 return self.encode({"traces": normalized_traces})
75
76 @staticmethod
77 def _convert_span(span):
78 # type: (Span) -> Dict[str, Any]
79 sp = span.to_dict()
80 sp["trace_id"] = JSONEncoderV2._encode_id_to_hex(sp.get("trace_id"))
81 sp["parent_id"] = JSONEncoderV2._encode_id_to_hex(sp.get("parent_id"))
82 sp["span_id"] = JSONEncoderV2._encode_id_to_hex(sp.get("span_id"))
83 return sp
84
85 @staticmethod
86 def _encode_id_to_hex(dd_id):
87 # type: (Optional[int]) -> str
88 if not dd_id:
89 return "0000000000000000"
90 return "%0.16X" % int(dd_id)
91
92 @staticmethod
93 def _decode_id_to_hex(hex_id):
94 # type: (Optional[str]) -> int
95 if not hex_id:
96 return 0
97 return int(hex_id, 16)
98
99
100 MSGPACK_ENCODERS = {
101 "v0.3": MsgpackEncoderV03,
102 "v0.4": MsgpackEncoderV03,
103 "v0.5": MsgpackEncoderV05,
104 }
105
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/ddtrace/internal/encoding.py b/ddtrace/internal/encoding.py
--- a/ddtrace/internal/encoding.py
+++ b/ddtrace/internal/encoding.py
@@ -8,6 +8,9 @@
from ._encoding import ListStringTable
from ._encoding import MsgpackEncoderV03
from ._encoding import MsgpackEncoderV05
+from .compat import PY3
+from .compat import binary_type
+from .compat import ensure_text
from .logger import get_logger
@@ -48,17 +51,33 @@
raise NotImplementedError()
-class JSONEncoder(_EncoderBase):
+class JSONEncoder(json.JSONEncoder, _EncoderBase):
content_type = "application/json"
def encode_traces(self, traces):
- normalized_traces = [[span.to_dict() for span in trace] for trace in traces]
+ normalized_traces = [[JSONEncoder._normalize_span(span.to_dict()) for span in trace] for trace in traces]
return self.encode(normalized_traces)
@staticmethod
- def encode(obj):
- # type: (Any) -> str
- return json.dumps(obj)
+ def _normalize_span(span):
+ # Ensure all string attributes are actually strings and not bytes
+ # DEV: We are deferring meta/metrics to reduce any performance issues.
+ # Meta/metrics may still contain `bytes` and have encoding issues.
+ span["resource"] = JSONEncoder._normalize_str(span["resource"])
+ span["name"] = JSONEncoder._normalize_str(span["name"])
+ span["service"] = JSONEncoder._normalize_str(span["service"])
+ return span
+
+ @staticmethod
+ def _normalize_str(obj):
+ if obj is None:
+ return obj
+
+ if PY3:
+ return ensure_text(obj, errors="backslashreplace")
+ elif isinstance(obj, binary_type):
+ return obj.decode("utf-8", errors="replace")
+ return obj
class JSONEncoderV2(JSONEncoder):
@@ -77,6 +96,7 @@
def _convert_span(span):
# type: (Span) -> Dict[str, Any]
sp = span.to_dict()
+ sp = JSONEncoderV2._normalize_span(sp)
sp["trace_id"] = JSONEncoderV2._encode_id_to_hex(sp.get("trace_id"))
sp["parent_id"] = JSONEncoderV2._encode_id_to_hex(sp.get("parent_id"))
sp["span_id"] = JSONEncoderV2._encode_id_to_hex(sp.get("span_id"))
|
{"golden_diff": "diff --git a/ddtrace/internal/encoding.py b/ddtrace/internal/encoding.py\n--- a/ddtrace/internal/encoding.py\n+++ b/ddtrace/internal/encoding.py\n@@ -8,6 +8,9 @@\n from ._encoding import ListStringTable\n from ._encoding import MsgpackEncoderV03\n from ._encoding import MsgpackEncoderV05\n+from .compat import PY3\n+from .compat import binary_type\n+from .compat import ensure_text\n from .logger import get_logger\n \n \n@@ -48,17 +51,33 @@\n raise NotImplementedError()\n \n \n-class JSONEncoder(_EncoderBase):\n+class JSONEncoder(json.JSONEncoder, _EncoderBase):\n content_type = \"application/json\"\n \n def encode_traces(self, traces):\n- normalized_traces = [[span.to_dict() for span in trace] for trace in traces]\n+ normalized_traces = [[JSONEncoder._normalize_span(span.to_dict()) for span in trace] for trace in traces]\n return self.encode(normalized_traces)\n \n @staticmethod\n- def encode(obj):\n- # type: (Any) -> str\n- return json.dumps(obj)\n+ def _normalize_span(span):\n+ # Ensure all string attributes are actually strings and not bytes\n+ # DEV: We are deferring meta/metrics to reduce any performance issues.\n+ # Meta/metrics may still contain `bytes` and have encoding issues.\n+ span[\"resource\"] = JSONEncoder._normalize_str(span[\"resource\"])\n+ span[\"name\"] = JSONEncoder._normalize_str(span[\"name\"])\n+ span[\"service\"] = JSONEncoder._normalize_str(span[\"service\"])\n+ return span\n+\n+ @staticmethod\n+ def _normalize_str(obj):\n+ if obj is None:\n+ return obj\n+\n+ if PY3:\n+ return ensure_text(obj, errors=\"backslashreplace\")\n+ elif isinstance(obj, binary_type):\n+ return obj.decode(\"utf-8\", errors=\"replace\")\n+ return obj\n \n \n class JSONEncoderV2(JSONEncoder):\n@@ -77,6 +96,7 @@\n def _convert_span(span):\n # type: (Span) -> Dict[str, Any]\n sp = span.to_dict()\n+ sp = JSONEncoderV2._normalize_span(sp)\n sp[\"trace_id\"] = JSONEncoderV2._encode_id_to_hex(sp.get(\"trace_id\"))\n sp[\"parent_id\"] = JSONEncoderV2._encode_id_to_hex(sp.get(\"parent_id\"))\n sp[\"span_id\"] = JSONEncoderV2._encode_id_to_hex(sp.get(\"span_id\"))\n", "issue": "Psycopg patching doesn't properly handle execute_values\nThe `execute_values` extension in psycopg2 composes and executes the query with b-string, even if you passed the query as a string. Below is the full function from psycopg2.extras\r\n\r\n```python\r\ndef execute_values(cur, sql, argslist, template=None, page_size=100, fetch=False):\r\n from psycopg2.sql import Composable\r\n if isinstance(sql, Composable):\r\n sql = sql.as_string(cur)\r\n\r\n # we can't just use sql % vals because vals is bytes: if sql is bytes\r\n # there will be some decoding error because of stupid codec used, and Py3\r\n # doesn't implement % on bytes.\r\n if not isinstance(sql, bytes):\r\n sql = sql.encode(_ext.encodings[cur.connection.encoding])\r\n pre, post = _split_sql(sql)\r\n\r\n result = [] if fetch else None\r\n for page in _paginate(argslist, page_size=page_size):\r\n if template is None:\r\n template = b'(' + b','.join([b'%s'] * len(page[0])) + b')'\r\n parts = pre[:]\r\n for args in page:\r\n parts.append(cur.mogrify(template, args))\r\n parts.append(b',')\r\n parts[-1:] = post\r\n cur.execute(b''.join(parts))\r\n if fetch:\r\n result.extend(cur.fetchall())\r\n\r\n return result\r\n```\r\n\r\nThe problem is that ddtrace assumes that the \"resource\" added to a span is a string. The result is that when `span.finish()` is called in the datadog lambda handler and it tries to serialize the span to json, it blows up with \"TypeError: Object of type bytes is not JSON serializable\". Upon investigation, I discovered that the ddtrace.internal.encoder.py's JSONEncoder just does a simple json.dumps() on all the spans and the `resource` attribute on the span from the using `execute_values` is bytes, not a string.\r\n\r\nI think the solution here is simply to update the Psycopg2TracedCursor class to decode the resource from bytes if it is bytes, like this:\r\n\r\n```python\r\nclass Psycopg2TracedCursor(dbapi.TracedCursor):\r\n \"\"\"TracedCursor for psycopg2\"\"\"\r\n\r\n def _trace_method(self, method, name, resource, extra_tags, *args, **kwargs):\r\n # treat psycopg2.sql.Composable resource objects as strings\r\n if isinstance(resource, Composable):\r\n resource = resource.as_string(self.__wrapped__)\r\n # THIS IS THE NEW PART BELOW (next 2 lines)\r\n if isinstance(resource, bytes):\r\n resource = resource.decode('utf-8')\r\n return super(Psycopg2TracedCursor, self)._trace_method(method, name, resource, extra_tags, *args, **kwargs)\r\n```\r\n\r\n### Which version of dd-trace-py are you using?\r\nLambda layer, v50.\r\n### Which version of pip are you using?\r\nn/a\r\n\r\n### How can we reproduce your problem?\r\nUse `execute_values` while inside a tracing context. It should have a 100% failure rate.\r\n\r\n### What is the result that you get?\r\nA type error when span.finish() is called and the metrics are furnished to DD.\r\n\r\n### What is the result that you expected?\r\nIt should work as normal, with the resource decoded as a string.\r\n\n", "before_files": [{"content": "import json\nfrom typing import Any\nfrom typing import Dict\nfrom typing import List\nfrom typing import Optional\nfrom typing import TYPE_CHECKING\n\nfrom ._encoding import ListStringTable\nfrom ._encoding import MsgpackEncoderV03\nfrom ._encoding import MsgpackEncoderV05\nfrom .logger import get_logger\n\n\n__all__ = [\"MsgpackEncoderV03\", \"MsgpackEncoderV05\", \"ListStringTable\", \"MSGPACK_ENCODERS\"]\n\n\nif TYPE_CHECKING:\n from ..span import Span\n\n\nlog = get_logger(__name__)\n\n\nclass _EncoderBase(object):\n \"\"\"\n Encoder interface that provides the logic to encode traces and service.\n \"\"\"\n\n def encode_traces(self, traces):\n # type: (List[List[Span]]) -> str\n \"\"\"\n Encodes a list of traces, expecting a list of items where each items\n is a list of spans. Before dumping the string in a serialized format all\n traces are normalized according to the encoding format. The trace\n nesting is not changed.\n\n :param traces: A list of traces that should be serialized\n \"\"\"\n raise NotImplementedError()\n\n def encode(self, obj):\n # type: (List[List[Any]]) -> str\n \"\"\"\n Defines the underlying format used during traces or services encoding.\n This method must be implemented and should only be used by the internal\n functions.\n \"\"\"\n raise NotImplementedError()\n\n\nclass JSONEncoder(_EncoderBase):\n content_type = \"application/json\"\n\n def encode_traces(self, traces):\n normalized_traces = [[span.to_dict() for span in trace] for trace in traces]\n return self.encode(normalized_traces)\n\n @staticmethod\n def encode(obj):\n # type: (Any) -> str\n return json.dumps(obj)\n\n\nclass JSONEncoderV2(JSONEncoder):\n \"\"\"\n JSONEncoderV2 encodes traces to the new intake API format.\n \"\"\"\n\n content_type = \"application/json\"\n\n def encode_traces(self, traces):\n # type: (List[List[Span]]) -> str\n normalized_traces = [[JSONEncoderV2._convert_span(span) for span in trace] for trace in traces]\n return self.encode({\"traces\": normalized_traces})\n\n @staticmethod\n def _convert_span(span):\n # type: (Span) -> Dict[str, Any]\n sp = span.to_dict()\n sp[\"trace_id\"] = JSONEncoderV2._encode_id_to_hex(sp.get(\"trace_id\"))\n sp[\"parent_id\"] = JSONEncoderV2._encode_id_to_hex(sp.get(\"parent_id\"))\n sp[\"span_id\"] = JSONEncoderV2._encode_id_to_hex(sp.get(\"span_id\"))\n return sp\n\n @staticmethod\n def _encode_id_to_hex(dd_id):\n # type: (Optional[int]) -> str\n if not dd_id:\n return \"0000000000000000\"\n return \"%0.16X\" % int(dd_id)\n\n @staticmethod\n def _decode_id_to_hex(hex_id):\n # type: (Optional[str]) -> int\n if not hex_id:\n return 0\n return int(hex_id, 16)\n\n\nMSGPACK_ENCODERS = {\n \"v0.3\": MsgpackEncoderV03,\n \"v0.4\": MsgpackEncoderV03,\n \"v0.5\": MsgpackEncoderV05,\n}\n", "path": "ddtrace/internal/encoding.py"}], "after_files": [{"content": "import json\nfrom typing import Any\nfrom typing import Dict\nfrom typing import List\nfrom typing import Optional\nfrom typing import TYPE_CHECKING\n\nfrom ._encoding import ListStringTable\nfrom ._encoding import MsgpackEncoderV03\nfrom ._encoding import MsgpackEncoderV05\nfrom .compat import PY3\nfrom .compat import binary_type\nfrom .compat import ensure_text\nfrom .logger import get_logger\n\n\n__all__ = [\"MsgpackEncoderV03\", \"MsgpackEncoderV05\", \"ListStringTable\", \"MSGPACK_ENCODERS\"]\n\n\nif TYPE_CHECKING:\n from ..span import Span\n\n\nlog = get_logger(__name__)\n\n\nclass _EncoderBase(object):\n \"\"\"\n Encoder interface that provides the logic to encode traces and service.\n \"\"\"\n\n def encode_traces(self, traces):\n # type: (List[List[Span]]) -> str\n \"\"\"\n Encodes a list of traces, expecting a list of items where each items\n is a list of spans. Before dumping the string in a serialized format all\n traces are normalized according to the encoding format. The trace\n nesting is not changed.\n\n :param traces: A list of traces that should be serialized\n \"\"\"\n raise NotImplementedError()\n\n def encode(self, obj):\n # type: (List[List[Any]]) -> str\n \"\"\"\n Defines the underlying format used during traces or services encoding.\n This method must be implemented and should only be used by the internal\n functions.\n \"\"\"\n raise NotImplementedError()\n\n\nclass JSONEncoder(json.JSONEncoder, _EncoderBase):\n content_type = \"application/json\"\n\n def encode_traces(self, traces):\n normalized_traces = [[JSONEncoder._normalize_span(span.to_dict()) for span in trace] for trace in traces]\n return self.encode(normalized_traces)\n\n @staticmethod\n def _normalize_span(span):\n # Ensure all string attributes are actually strings and not bytes\n # DEV: We are deferring meta/metrics to reduce any performance issues.\n # Meta/metrics may still contain `bytes` and have encoding issues.\n span[\"resource\"] = JSONEncoder._normalize_str(span[\"resource\"])\n span[\"name\"] = JSONEncoder._normalize_str(span[\"name\"])\n span[\"service\"] = JSONEncoder._normalize_str(span[\"service\"])\n return span\n\n @staticmethod\n def _normalize_str(obj):\n if obj is None:\n return obj\n\n if PY3:\n return ensure_text(obj, errors=\"backslashreplace\")\n elif isinstance(obj, binary_type):\n return obj.decode(\"utf-8\", errors=\"replace\")\n return obj\n\n\nclass JSONEncoderV2(JSONEncoder):\n \"\"\"\n JSONEncoderV2 encodes traces to the new intake API format.\n \"\"\"\n\n content_type = \"application/json\"\n\n def encode_traces(self, traces):\n # type: (List[List[Span]]) -> str\n normalized_traces = [[JSONEncoderV2._convert_span(span) for span in trace] for trace in traces]\n return self.encode({\"traces\": normalized_traces})\n\n @staticmethod\n def _convert_span(span):\n # type: (Span) -> Dict[str, Any]\n sp = span.to_dict()\n sp = JSONEncoderV2._normalize_span(sp)\n sp[\"trace_id\"] = JSONEncoderV2._encode_id_to_hex(sp.get(\"trace_id\"))\n sp[\"parent_id\"] = JSONEncoderV2._encode_id_to_hex(sp.get(\"parent_id\"))\n sp[\"span_id\"] = JSONEncoderV2._encode_id_to_hex(sp.get(\"span_id\"))\n return sp\n\n @staticmethod\n def _encode_id_to_hex(dd_id):\n # type: (Optional[int]) -> str\n if not dd_id:\n return \"0000000000000000\"\n return \"%0.16X\" % int(dd_id)\n\n @staticmethod\n def _decode_id_to_hex(hex_id):\n # type: (Optional[str]) -> int\n if not hex_id:\n return 0\n return int(hex_id, 16)\n\n\nMSGPACK_ENCODERS = {\n \"v0.3\": MsgpackEncoderV03,\n \"v0.4\": MsgpackEncoderV03,\n \"v0.5\": MsgpackEncoderV05,\n}\n", "path": "ddtrace/internal/encoding.py"}]}
| 1,917 | 561 |
gh_patches_debug_27090
|
rasdani/github-patches
|
git_diff
|
data-for-change__anyway-720
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Clusters view still very heavy
I uploaded the code to a new server, following #463.
The basic markers view is better and faster, but the clusters is still heavy and might cause server failure (I think the query is hogging the DB).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `clusters_calculator.py`
Content:
```
1 from models import Marker
2 from static.pymapcluster import calculate_clusters
3 import logging
4 import concurrent.futures
5 import multiprocessing
6
7
8 def retrieve_clusters(**kwargs):
9 marker_boxes = divide_to_boxes(kwargs['ne_lat'], kwargs['ne_lng'], kwargs['sw_lat'], kwargs['sw_lng'])
10 result_futures = []
11 logging.info('number of cores: ' + str(multiprocessing.cpu_count()))
12 with concurrent.futures.ThreadPoolExecutor(max_workers=multiprocessing.cpu_count()) as executor:
13 for marker_box in marker_boxes:
14
15 kwargs.update(marker_box)
16 markers_in_box = Marker.bounding_box_query(**kwargs).markers.all()
17 result_futures.append(executor.submit(calculate_clusters, markers_in_box, kwargs['zoom']))
18
19 completed_futures = concurrent.futures.wait(result_futures)
20 result = []
21 for future in completed_futures.done:
22 result.extend(future.result())
23
24 return result
25
26
27 def divide_to_boxes(ne_lat, ne_lng, sw_lat, sw_lng):
28 cpu_count = multiprocessing.cpu_count()
29 lat_box_size = (ne_lat - sw_lat) / cpu_count
30 # lng_box_size = (sw_lng - ne_lng) / cpu_count
31 boxes = []
32 for i in xrange(cpu_count):
33 # TODO: the below calculation is using sw_lat as first param instead of ne_lat. Plz verify my fix for that:
34 # boxes.append((sw_lat + (i + 1) * lat_box_size, ne_lng, sw_lat + i * lat_box_size, sw_lng))
35 boxes.append({'ne_lat': ne_lat + (i + 1) * lat_box_size, 'ne_lng': ne_lng,
36 'sw_lat': sw_lat + i * lat_box_size, 'sw_lng': sw_lng})
37
38 return boxes
39
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/clusters_calculator.py b/clusters_calculator.py
--- a/clusters_calculator.py
+++ b/clusters_calculator.py
@@ -1,27 +1,25 @@
+import itertools
+from celery import Celery, group
from models import Marker
from static.pymapcluster import calculate_clusters
-import logging
-import concurrent.futures
import multiprocessing
-def retrieve_clusters(**kwargs):
- marker_boxes = divide_to_boxes(kwargs['ne_lat'], kwargs['ne_lng'], kwargs['sw_lat'], kwargs['sw_lng'])
- result_futures = []
- logging.info('number of cores: ' + str(multiprocessing.cpu_count()))
- with concurrent.futures.ThreadPoolExecutor(max_workers=multiprocessing.cpu_count()) as executor:
- for marker_box in marker_boxes:
+celery_app = Celery('tasks', backend='rpc://', broker='pyamqp://guest@localhost//')
- kwargs.update(marker_box)
- markers_in_box = Marker.bounding_box_query(**kwargs).markers.all()
- result_futures.append(executor.submit(calculate_clusters, markers_in_box, kwargs['zoom']))
+@celery_app.task
+def calculate_marker_box(kwargs, marker_box):
+ kwargs.update(marker_box)
+ markers_in_box = Marker.bounding_box_query(**kwargs).markers.all()
+ return calculate_clusters(markers_in_box, kwargs['zoom'])
- completed_futures = concurrent.futures.wait(result_futures)
- result = []
- for future in completed_futures.done:
- result.extend(future.result())
- return result
+def retrieve_clusters(**kwargs):
+ marker_boxes = divide_to_boxes(kwargs['ne_lat'], kwargs['ne_lng'], kwargs['sw_lat'], kwargs['sw_lng'])
+ job = group([calculate_marker_box.s(kwargs, marker_box) for marker_box in marker_boxes])
+ result = job.apply_async()
+ result.join()
+ return list(itertools.chain.from_iterable(result.get()))
def divide_to_boxes(ne_lat, ne_lng, sw_lat, sw_lng):
|
{"golden_diff": "diff --git a/clusters_calculator.py b/clusters_calculator.py\n--- a/clusters_calculator.py\n+++ b/clusters_calculator.py\n@@ -1,27 +1,25 @@\n+import itertools\n+from celery import Celery, group\n from models import Marker\n from static.pymapcluster import calculate_clusters\n-import logging\n-import concurrent.futures\n import multiprocessing\n \n \n-def retrieve_clusters(**kwargs):\n- marker_boxes = divide_to_boxes(kwargs['ne_lat'], kwargs['ne_lng'], kwargs['sw_lat'], kwargs['sw_lng'])\n- result_futures = []\n- logging.info('number of cores: ' + str(multiprocessing.cpu_count()))\n- with concurrent.futures.ThreadPoolExecutor(max_workers=multiprocessing.cpu_count()) as executor:\n- for marker_box in marker_boxes:\n+celery_app = Celery('tasks', backend='rpc://', broker='pyamqp://guest@localhost//')\n \n- kwargs.update(marker_box)\n- markers_in_box = Marker.bounding_box_query(**kwargs).markers.all()\n- result_futures.append(executor.submit(calculate_clusters, markers_in_box, kwargs['zoom']))\n+@celery_app.task\n+def calculate_marker_box(kwargs, marker_box):\n+ kwargs.update(marker_box)\n+ markers_in_box = Marker.bounding_box_query(**kwargs).markers.all()\n+ return calculate_clusters(markers_in_box, kwargs['zoom'])\n \n- completed_futures = concurrent.futures.wait(result_futures)\n- result = []\n- for future in completed_futures.done:\n- result.extend(future.result())\n \n- return result\n+def retrieve_clusters(**kwargs):\n+ marker_boxes = divide_to_boxes(kwargs['ne_lat'], kwargs['ne_lng'], kwargs['sw_lat'], kwargs['sw_lng'])\n+ job = group([calculate_marker_box.s(kwargs, marker_box) for marker_box in marker_boxes])\n+ result = job.apply_async()\n+ result.join()\n+ return list(itertools.chain.from_iterable(result.get()))\n \n \n def divide_to_boxes(ne_lat, ne_lng, sw_lat, sw_lng):\n", "issue": "Clusters view still very heavy\nI uploaded the code to a new server, following #463.\r\nThe basic markers view is better and faster, but the clusters is still heavy and might cause server failure (I think the query is hogging the DB).\n", "before_files": [{"content": "from models import Marker\nfrom static.pymapcluster import calculate_clusters\nimport logging\nimport concurrent.futures\nimport multiprocessing\n\n\ndef retrieve_clusters(**kwargs):\n marker_boxes = divide_to_boxes(kwargs['ne_lat'], kwargs['ne_lng'], kwargs['sw_lat'], kwargs['sw_lng'])\n result_futures = []\n logging.info('number of cores: ' + str(multiprocessing.cpu_count()))\n with concurrent.futures.ThreadPoolExecutor(max_workers=multiprocessing.cpu_count()) as executor:\n for marker_box in marker_boxes:\n\n kwargs.update(marker_box)\n markers_in_box = Marker.bounding_box_query(**kwargs).markers.all()\n result_futures.append(executor.submit(calculate_clusters, markers_in_box, kwargs['zoom']))\n\n completed_futures = concurrent.futures.wait(result_futures)\n result = []\n for future in completed_futures.done:\n result.extend(future.result())\n\n return result\n\n\ndef divide_to_boxes(ne_lat, ne_lng, sw_lat, sw_lng):\n cpu_count = multiprocessing.cpu_count()\n lat_box_size = (ne_lat - sw_lat) / cpu_count\n # lng_box_size = (sw_lng - ne_lng) / cpu_count\n boxes = []\n for i in xrange(cpu_count):\n # TODO: the below calculation is using sw_lat as first param instead of ne_lat. Plz verify my fix for that:\n # boxes.append((sw_lat + (i + 1) * lat_box_size, ne_lng, sw_lat + i * lat_box_size, sw_lng))\n boxes.append({'ne_lat': ne_lat + (i + 1) * lat_box_size, 'ne_lng': ne_lng,\n 'sw_lat': sw_lat + i * lat_box_size, 'sw_lng': sw_lng})\n\n return boxes\n", "path": "clusters_calculator.py"}], "after_files": [{"content": "import itertools\nfrom celery import Celery, group\nfrom models import Marker\nfrom static.pymapcluster import calculate_clusters\nimport multiprocessing\n\n\ncelery_app = Celery('tasks', backend='rpc://', broker='pyamqp://guest@localhost//')\n\n@celery_app.task\ndef calculate_marker_box(kwargs, marker_box):\n kwargs.update(marker_box)\n markers_in_box = Marker.bounding_box_query(**kwargs).markers.all()\n return calculate_clusters(markers_in_box, kwargs['zoom'])\n\n\ndef retrieve_clusters(**kwargs):\n marker_boxes = divide_to_boxes(kwargs['ne_lat'], kwargs['ne_lng'], kwargs['sw_lat'], kwargs['sw_lng'])\n job = group([calculate_marker_box.s(kwargs, marker_box) for marker_box in marker_boxes])\n result = job.apply_async()\n result.join()\n return list(itertools.chain.from_iterable(result.get()))\n\n\ndef divide_to_boxes(ne_lat, ne_lng, sw_lat, sw_lng):\n cpu_count = multiprocessing.cpu_count()\n lat_box_size = (ne_lat - sw_lat) / cpu_count\n # lng_box_size = (sw_lng - ne_lng) / cpu_count\n boxes = []\n for i in xrange(cpu_count):\n # TODO: the below calculation is using sw_lat as first param instead of ne_lat. Plz verify my fix for that:\n # boxes.append((sw_lat + (i + 1) * lat_box_size, ne_lng, sw_lat + i * lat_box_size, sw_lng))\n boxes.append({'ne_lat': ne_lat + (i + 1) * lat_box_size, 'ne_lng': ne_lng,\n 'sw_lat': sw_lat + i * lat_box_size, 'sw_lng': sw_lng})\n\n return boxes\n", "path": "clusters_calculator.py"}]}
| 758 | 443 |
gh_patches_debug_4555
|
rasdani/github-patches
|
git_diff
|
kubeflow__pipelines-9157
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[sdk] Containerized Python Component module not found error
There is a bug when building a containerized Python component that happens (at least) in the case when the longest path of the import graph ending at the component involves >2 modules.
### Environment
KFP SDK 2.0.0-beta.6
### Steps to reproduce
For example:
```python
# component.py
from module_one import one
from kfp import dsl
@dsl.component
def comp(): ...
```
```python
# module_one.py
from module_two import two
one = 1
```
```python
# module_two.py
two = 2
```
Then: `kfp component build .`
You get a `No module named` error.
### Expected result
Should build without an error.
### Materials and Reference
Related: https://github.com/kubeflow/pipelines/issues/8353
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sdk/python/kfp/components/utils.py`
Content:
```
1 # Copyright 2021 The Kubeflow Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 """Definitions of utils methods."""
15
16 import importlib
17 import os
18 import re
19 import sys
20 import types
21 from typing import List
22
23 _COMPONENT_NAME_PREFIX = 'comp-'
24 _EXECUTOR_LABEL_PREFIX = 'exec-'
25
26
27 def load_module(module_name: str, module_directory: str) -> types.ModuleType:
28 """Dynamically imports the Python module with the given name and package
29 path.
30
31 E.g., Assuming there is a file called `my_module.py` under
32 `/some/directory/my_module`, we can use::
33
34 load_module('my_module', '/some/directory')
35
36 to effectively `import mymodule`.
37
38 Args:
39 module_name: The name of the module.
40 package_path: The package under which the specified module resides.
41 """
42 module_spec = importlib.util.spec_from_file_location(
43 name=module_name,
44 location=os.path.join(module_directory, f'{module_name}.py'))
45 module = importlib.util.module_from_spec(module_spec)
46 sys.modules[module_spec.name] = module
47 module_spec.loader.exec_module(module)
48 return module
49
50
51 def maybe_rename_for_k8s(name: str) -> str:
52 """Cleans and converts a name to be k8s compatible.
53
54 Args:
55 name: The original name.
56
57 Returns:
58 A sanitized name.
59 """
60 return re.sub('-+', '-', re.sub('[^-0-9a-z]+', '-',
61 name.lower())).lstrip('-').rstrip('-')
62
63
64 def sanitize_input_name(name: str) -> str:
65 """Sanitizes input name."""
66 return re.sub('[^_0-9a-z]+', '_', name.lower()).lstrip('_').rstrip('_')
67
68
69 def sanitize_component_name(name: str) -> str:
70 """Sanitizes component name."""
71 return _COMPONENT_NAME_PREFIX + maybe_rename_for_k8s(name)
72
73
74 def sanitize_task_name(name: str) -> str:
75 """Sanitizes task name."""
76 return maybe_rename_for_k8s(name)
77
78
79 def sanitize_executor_label(label: str) -> str:
80 """Sanitizes executor label."""
81 return _EXECUTOR_LABEL_PREFIX + maybe_rename_for_k8s(label)
82
83
84 def make_name_unique_by_adding_index(
85 name: str,
86 collection: List[str],
87 delimiter: str,
88 ) -> str:
89 """Makes a unique name by adding index.
90
91 The index starts from 2 and increase by 1 until we find a unique name.
92
93 Args:
94 name: The original name.
95 collection: The collection of existing names.
96 delimiter: The delimiter to connect the original name and an index.
97
98 Returns:
99 A unique name composed of name+delimiter+next index
100 """
101 unique_name = name
102 if unique_name in collection:
103 for i in range(2, sys.maxsize**10):
104 unique_name = name + delimiter + str(i)
105 if unique_name not in collection:
106 break
107 return unique_name
108
109
110 def validate_pipeline_name(name: str) -> None:
111 """Validate pipeline name.
112
113 A valid pipeline name should match ^[a-z0-9][a-z0-9-]{0,127}$.
114
115 Args:
116 name: The pipeline name.
117
118 Raises:
119 ValueError if the pipeline name doesn't conform to the regular expression.
120 """
121 pattern = re.compile(r'^[a-z0-9][a-z0-9-]{0,127}$')
122 if not pattern.match(name):
123 raise ValueError(
124 'Invalid pipeline name: %s.\n'
125 'Please specify a pipeline name that matches the regular '
126 'expression "^[a-z0-9][a-z0-9-]{0,127}$" using '
127 '`dsl.pipeline(name=...)` decorator.' % name)
128
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/sdk/python/kfp/components/utils.py b/sdk/python/kfp/components/utils.py
--- a/sdk/python/kfp/components/utils.py
+++ b/sdk/python/kfp/components/utils.py
@@ -44,6 +44,7 @@
location=os.path.join(module_directory, f'{module_name}.py'))
module = importlib.util.module_from_spec(module_spec)
sys.modules[module_spec.name] = module
+ sys.path.insert(0, str(module_directory))
module_spec.loader.exec_module(module)
return module
|
{"golden_diff": "diff --git a/sdk/python/kfp/components/utils.py b/sdk/python/kfp/components/utils.py\n--- a/sdk/python/kfp/components/utils.py\n+++ b/sdk/python/kfp/components/utils.py\n@@ -44,6 +44,7 @@\n location=os.path.join(module_directory, f'{module_name}.py'))\n module = importlib.util.module_from_spec(module_spec)\n sys.modules[module_spec.name] = module\n+ sys.path.insert(0, str(module_directory))\n module_spec.loader.exec_module(module)\n return module\n", "issue": "[sdk] Containerized Python Component module not found error \nThere is a bug when building a containerized Python component that happens (at least) in the case when the longest path of the import graph ending at the component involves >2 modules. \r\n\r\n### Environment\r\nKFP SDK 2.0.0-beta.6\r\n\r\n### Steps to reproduce\r\nFor example:\r\n\r\n```python\r\n# component.py\r\nfrom module_one import one\r\nfrom kfp import dsl\r\n\r\[email protected]\r\ndef comp(): ...\r\n```\r\n\r\n```python\r\n# module_one.py\r\nfrom module_two import two\r\none = 1\r\n```\r\n\r\n```python\r\n# module_two.py\r\ntwo = 2\r\n```\r\n\r\nThen: `kfp component build .`\r\n\r\nYou get a `No module named` error.\r\n\r\n### Expected result\r\n\r\nShould build without an error.\r\n\r\n### Materials and Reference\r\nRelated: https://github.com/kubeflow/pipelines/issues/8353\n", "before_files": [{"content": "# Copyright 2021 The Kubeflow Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Definitions of utils methods.\"\"\"\n\nimport importlib\nimport os\nimport re\nimport sys\nimport types\nfrom typing import List\n\n_COMPONENT_NAME_PREFIX = 'comp-'\n_EXECUTOR_LABEL_PREFIX = 'exec-'\n\n\ndef load_module(module_name: str, module_directory: str) -> types.ModuleType:\n \"\"\"Dynamically imports the Python module with the given name and package\n path.\n\n E.g., Assuming there is a file called `my_module.py` under\n `/some/directory/my_module`, we can use::\n\n load_module('my_module', '/some/directory')\n\n to effectively `import mymodule`.\n\n Args:\n module_name: The name of the module.\n package_path: The package under which the specified module resides.\n \"\"\"\n module_spec = importlib.util.spec_from_file_location(\n name=module_name,\n location=os.path.join(module_directory, f'{module_name}.py'))\n module = importlib.util.module_from_spec(module_spec)\n sys.modules[module_spec.name] = module\n module_spec.loader.exec_module(module)\n return module\n\n\ndef maybe_rename_for_k8s(name: str) -> str:\n \"\"\"Cleans and converts a name to be k8s compatible.\n\n Args:\n name: The original name.\n\n Returns:\n A sanitized name.\n \"\"\"\n return re.sub('-+', '-', re.sub('[^-0-9a-z]+', '-',\n name.lower())).lstrip('-').rstrip('-')\n\n\ndef sanitize_input_name(name: str) -> str:\n \"\"\"Sanitizes input name.\"\"\"\n return re.sub('[^_0-9a-z]+', '_', name.lower()).lstrip('_').rstrip('_')\n\n\ndef sanitize_component_name(name: str) -> str:\n \"\"\"Sanitizes component name.\"\"\"\n return _COMPONENT_NAME_PREFIX + maybe_rename_for_k8s(name)\n\n\ndef sanitize_task_name(name: str) -> str:\n \"\"\"Sanitizes task name.\"\"\"\n return maybe_rename_for_k8s(name)\n\n\ndef sanitize_executor_label(label: str) -> str:\n \"\"\"Sanitizes executor label.\"\"\"\n return _EXECUTOR_LABEL_PREFIX + maybe_rename_for_k8s(label)\n\n\ndef make_name_unique_by_adding_index(\n name: str,\n collection: List[str],\n delimiter: str,\n) -> str:\n \"\"\"Makes a unique name by adding index.\n\n The index starts from 2 and increase by 1 until we find a unique name.\n\n Args:\n name: The original name.\n collection: The collection of existing names.\n delimiter: The delimiter to connect the original name and an index.\n\n Returns:\n A unique name composed of name+delimiter+next index\n \"\"\"\n unique_name = name\n if unique_name in collection:\n for i in range(2, sys.maxsize**10):\n unique_name = name + delimiter + str(i)\n if unique_name not in collection:\n break\n return unique_name\n\n\ndef validate_pipeline_name(name: str) -> None:\n \"\"\"Validate pipeline name.\n\n A valid pipeline name should match ^[a-z0-9][a-z0-9-]{0,127}$.\n\n Args:\n name: The pipeline name.\n\n Raises:\n ValueError if the pipeline name doesn't conform to the regular expression.\n \"\"\"\n pattern = re.compile(r'^[a-z0-9][a-z0-9-]{0,127}$')\n if not pattern.match(name):\n raise ValueError(\n 'Invalid pipeline name: %s.\\n'\n 'Please specify a pipeline name that matches the regular '\n 'expression \"^[a-z0-9][a-z0-9-]{0,127}$\" using '\n '`dsl.pipeline(name=...)` decorator.' % name)\n", "path": "sdk/python/kfp/components/utils.py"}], "after_files": [{"content": "# Copyright 2021 The Kubeflow Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Definitions of utils methods.\"\"\"\n\nimport importlib\nimport os\nimport re\nimport sys\nimport types\nfrom typing import List\n\n_COMPONENT_NAME_PREFIX = 'comp-'\n_EXECUTOR_LABEL_PREFIX = 'exec-'\n\n\ndef load_module(module_name: str, module_directory: str) -> types.ModuleType:\n \"\"\"Dynamically imports the Python module with the given name and package\n path.\n\n E.g., Assuming there is a file called `my_module.py` under\n `/some/directory/my_module`, we can use::\n\n load_module('my_module', '/some/directory')\n\n to effectively `import mymodule`.\n\n Args:\n module_name: The name of the module.\n package_path: The package under which the specified module resides.\n \"\"\"\n module_spec = importlib.util.spec_from_file_location(\n name=module_name,\n location=os.path.join(module_directory, f'{module_name}.py'))\n module = importlib.util.module_from_spec(module_spec)\n sys.modules[module_spec.name] = module\n sys.path.insert(0, str(module_directory))\n module_spec.loader.exec_module(module)\n return module\n\n\ndef maybe_rename_for_k8s(name: str) -> str:\n \"\"\"Cleans and converts a name to be k8s compatible.\n\n Args:\n name: The original name.\n\n Returns:\n A sanitized name.\n \"\"\"\n return re.sub('-+', '-', re.sub('[^-0-9a-z]+', '-',\n name.lower())).lstrip('-').rstrip('-')\n\n\ndef sanitize_input_name(name: str) -> str:\n \"\"\"Sanitizes input name.\"\"\"\n return re.sub('[^_0-9a-z]+', '_', name.lower()).lstrip('_').rstrip('_')\n\n\ndef sanitize_component_name(name: str) -> str:\n \"\"\"Sanitizes component name.\"\"\"\n return _COMPONENT_NAME_PREFIX + maybe_rename_for_k8s(name)\n\n\ndef sanitize_task_name(name: str) -> str:\n \"\"\"Sanitizes task name.\"\"\"\n return maybe_rename_for_k8s(name)\n\n\ndef sanitize_executor_label(label: str) -> str:\n \"\"\"Sanitizes executor label.\"\"\"\n return _EXECUTOR_LABEL_PREFIX + maybe_rename_for_k8s(label)\n\n\ndef make_name_unique_by_adding_index(\n name: str,\n collection: List[str],\n delimiter: str,\n) -> str:\n \"\"\"Makes a unique name by adding index.\n\n The index starts from 2 and increase by 1 until we find a unique name.\n\n Args:\n name: The original name.\n collection: The collection of existing names.\n delimiter: The delimiter to connect the original name and an index.\n\n Returns:\n A unique name composed of name+delimiter+next index\n \"\"\"\n unique_name = name\n if unique_name in collection:\n for i in range(2, sys.maxsize**10):\n unique_name = name + delimiter + str(i)\n if unique_name not in collection:\n break\n return unique_name\n\n\ndef validate_pipeline_name(name: str) -> None:\n \"\"\"Validate pipeline name.\n\n A valid pipeline name should match ^[a-z0-9][a-z0-9-]{0,127}$.\n\n Args:\n name: The pipeline name.\n\n Raises:\n ValueError if the pipeline name doesn't conform to the regular expression.\n \"\"\"\n pattern = re.compile(r'^[a-z0-9][a-z0-9-]{0,127}$')\n if not pattern.match(name):\n raise ValueError(\n 'Invalid pipeline name: %s.\\n'\n 'Please specify a pipeline name that matches the regular '\n 'expression \"^[a-z0-9][a-z0-9-]{0,127}$\" using '\n '`dsl.pipeline(name=...)` decorator.' % name)\n", "path": "sdk/python/kfp/components/utils.py"}]}
| 1,687 | 114 |
gh_patches_debug_14269
|
rasdani/github-patches
|
git_diff
|
learningequality__kolibri-2093
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
importchannel command is broken
## Summary
* `importchannel` command is broken
## System information
- Version: 0.6
- Operating system: Linux
```
kolibri manage importchannel -- network bcd99d8aeef04ce6b9e25a88d87eedb7
INFO Downloading data for channel id bcd99d8aeef04ce6b9e25a88d87eedb7
0%| | 0/239616 [00:00<?, ?it/s]
Traceback (most recent call last):
File "/home/christian/.virtualenvs/kolibri/bin/kolibri", line 9, in <module>
load_entry_point('kolibri', 'console_scripts', 'kolibri')()
File "/home/christian/repos/kolibri/kolibri/utils/cli.py", line 580, in main
manage(command, args=django_args)
File "/home/christian/repos/kolibri/kolibri/utils/cli.py", line 411, in manage
execute_from_command_line(argv=argv)
File "/home/christian/.virtualenvs/kolibri/local/lib/python2.7/site-packages/django/core/management/__init__.py", line 353, in execute_from_command_line
utility.execute()
File "/home/christian/.virtualenvs/kolibri/local/lib/python2.7/site-packages/django/core/management/__init__.py", line 345, in execute
self.fetch_command(subcommand).run_from_argv(self.argv)
File "/home/christian/.virtualenvs/kolibri/local/lib/python2.7/site-packages/django/core/management/base.py", line 348, in run_from_argv
self.execute(*args, **cmd_options)
File "/home/christian/.virtualenvs/kolibri/local/lib/python2.7/site-packages/django/core/management/base.py", line 399, in execute
output = self.handle(*args, **options)
File "/home/christian/repos/kolibri/kolibri/tasks/management/commands/base.py", line 98, in handle
return self.handle_async(*args, **options)
File "/home/christian/repos/kolibri/kolibri/content/management/commands/importchannel.py", line 89, in handle_async
self.download_channel(options["channel_id"])
File "/home/christian/repos/kolibri/kolibri/content/management/commands/importchannel.py", line 42, in download_channel
self._transfer(DOWNLOAD_METHOD, channel_id)
File "/home/christian/repos/kolibri/kolibri/content/management/commands/importchannel.py", line 73, in _transfer
if self.is_cancelled():
File "/home/christian/repos/kolibri/kolibri/tasks/management/commands/base.py", line 108, in is_cancelled
self.check_for_cancel(last_stage)
TypeError: 'NoneType' object is not callable
```
## How to reproduce
1. Use the `importchannel` command
## Real-life consequences
* Breaks my workflow.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kolibri/tasks/management/commands/base.py`
Content:
```
1 import abc
2 from collections import namedtuple
3
4 from barbequeue.exceptions import UserCancelledError
5 from django.core.management.base import BaseCommand
6 from tqdm import tqdm
7
8 Progress = namedtuple(
9 'Progress',
10 [
11 'progress_fraction',
12 'message',
13 'extra_data',
14 'level',
15 ]
16 )
17
18
19 class ProgressTracker():
20
21 def __init__(self, total=100, level=0, update_callback=None):
22
23 # set default values
24 self.progress = 0
25 self.message = ""
26 self.extra_data = None
27
28 # store provided arguments
29 self.total = total
30 self.level = level
31 self.update_callback = update_callback
32
33 # initialize the tqdm progress bar
34 self.progressbar = tqdm(total=total)
35
36 def update_progress(self, increment=1, message="", extra_data=None):
37
38 self.progressbar.update(increment)
39
40 self.progress += increment
41
42 self.message = message
43
44 self.extra_data = extra_data
45
46 if callable(self.update_callback):
47 p = self.get_progress()
48 self.update_callback(p.progress_fraction, p)
49
50 def get_progress(self):
51
52 return Progress(
53 progress_fraction=0 if self.total == 0 else self.progress / float(self.total),
54 message=self.message,
55 extra_data=self.extra_data,
56 level=self.level,
57 )
58
59 def __enter__(self):
60 return self.update_progress
61
62 def __exit__(self, *exc_details):
63 if self.progressbar is not None:
64 self.progressbar.close()
65
66
67 class AsyncCommand(BaseCommand):
68 """A management command with added convenience functions for displaying
69 progress to the user.
70
71 Rather than implementing handle() (as is for BaseCommand), subclasses, must
72 implement handle_async(), which accepts the same arguments as handle().
73
74 If ran from the command line, AsynCommand displays a progress bar to the
75 user. If ran asynchronously through kolibri.tasks.schedule_command(),
76 AsyncCommand sends results through the Progress class to the main Django
77 process. Anyone who knows the task id for the command instance can check
78 the intermediate progress by looking at the task's AsyncResult.result
79 variable.
80
81 """
82
83 def __init__(self, *args, **kwargs):
84 self.progresstrackers = []
85
86 def _update_all_progress(self, progress_fraction, progress):
87 if callable(self.update_progress):
88 progress_list = [p.get_progress() for p in self.progresstrackers]
89 # HACK (aron): self.update_progress' signature has changed between django_q
90 # and iceqube/bbq. It now expects the current progress,
91 # the total progress, and then derives the
92 # percentage progress manually.
93 self.update_progress(progress_list[0].progress_fraction, 1.)
94
95 def handle(self, *args, **options):
96 self.update_progress = options.pop("update_progress", None)
97 self.check_for_cancel = options.pop("check_for_cancel", None)
98 return self.handle_async(*args, **options)
99
100 def start_progress(self, total=100):
101 level = len(self.progresstrackers)
102 tracker = ProgressTracker(total=total, level=level, update_callback=self._update_all_progress)
103 self.progresstrackers.append(tracker)
104 return tracker
105
106 def is_cancelled(self, last_stage="CANCELLING"):
107 try:
108 self.check_for_cancel(last_stage)
109 return False
110 except UserCancelledError:
111 return True
112
113 def cancel(self, last_stage="CANCELLED"):
114 self.check_for_cancel(last_stage)
115
116 @abc.abstractmethod
117 def handle_async(self, *args, **options):
118 """
119 handle_async should be reimplemented by any Subclass of AsyncCommand.
120 """
121 pass
122
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/kolibri/tasks/management/commands/base.py b/kolibri/tasks/management/commands/base.py
--- a/kolibri/tasks/management/commands/base.py
+++ b/kolibri/tasks/management/commands/base.py
@@ -104,14 +104,17 @@
return tracker
def is_cancelled(self, last_stage="CANCELLING"):
- try:
- self.check_for_cancel(last_stage)
- return False
- except UserCancelledError:
- return True
+ if self.check_for_cancel:
+ try:
+ self.check_for_cancel(last_stage)
+ return False
+ except UserCancelledError:
+ return True
+ return False
def cancel(self, last_stage="CANCELLED"):
- self.check_for_cancel(last_stage)
+ if self.check_for_cancel:
+ return self.check_for_cancel(last_stage)
@abc.abstractmethod
def handle_async(self, *args, **options):
|
{"golden_diff": "diff --git a/kolibri/tasks/management/commands/base.py b/kolibri/tasks/management/commands/base.py\n--- a/kolibri/tasks/management/commands/base.py\n+++ b/kolibri/tasks/management/commands/base.py\n@@ -104,14 +104,17 @@\n return tracker\n \n def is_cancelled(self, last_stage=\"CANCELLING\"):\n- try:\n- self.check_for_cancel(last_stage)\n- return False\n- except UserCancelledError:\n- return True\n+ if self.check_for_cancel:\n+ try:\n+ self.check_for_cancel(last_stage)\n+ return False\n+ except UserCancelledError:\n+ return True\n+ return False\n \n def cancel(self, last_stage=\"CANCELLED\"):\n- self.check_for_cancel(last_stage)\n+ if self.check_for_cancel:\n+ return self.check_for_cancel(last_stage)\n \n @abc.abstractmethod\n def handle_async(self, *args, **options):\n", "issue": "importchannel command is broken\n## Summary\r\n\r\n* `importchannel` command is broken\r\n\r\n## System information\r\n\r\n - Version: 0.6\r\n - Operating system: Linux\r\n\r\n```\r\nkolibri manage importchannel -- network bcd99d8aeef04ce6b9e25a88d87eedb7\r\nINFO Downloading data for channel id bcd99d8aeef04ce6b9e25a88d87eedb7\r\n 0%| | 0/239616 [00:00<?, ?it/s]\r\nTraceback (most recent call last):\r\n File \"/home/christian/.virtualenvs/kolibri/bin/kolibri\", line 9, in <module>\r\n load_entry_point('kolibri', 'console_scripts', 'kolibri')()\r\n File \"/home/christian/repos/kolibri/kolibri/utils/cli.py\", line 580, in main\r\n manage(command, args=django_args)\r\n File \"/home/christian/repos/kolibri/kolibri/utils/cli.py\", line 411, in manage\r\n execute_from_command_line(argv=argv)\r\n File \"/home/christian/.virtualenvs/kolibri/local/lib/python2.7/site-packages/django/core/management/__init__.py\", line 353, in execute_from_command_line\r\n utility.execute()\r\n File \"/home/christian/.virtualenvs/kolibri/local/lib/python2.7/site-packages/django/core/management/__init__.py\", line 345, in execute\r\n self.fetch_command(subcommand).run_from_argv(self.argv)\r\n File \"/home/christian/.virtualenvs/kolibri/local/lib/python2.7/site-packages/django/core/management/base.py\", line 348, in run_from_argv\r\n self.execute(*args, **cmd_options)\r\n File \"/home/christian/.virtualenvs/kolibri/local/lib/python2.7/site-packages/django/core/management/base.py\", line 399, in execute\r\n output = self.handle(*args, **options)\r\n File \"/home/christian/repos/kolibri/kolibri/tasks/management/commands/base.py\", line 98, in handle\r\n return self.handle_async(*args, **options)\r\n File \"/home/christian/repos/kolibri/kolibri/content/management/commands/importchannel.py\", line 89, in handle_async\r\n self.download_channel(options[\"channel_id\"])\r\n File \"/home/christian/repos/kolibri/kolibri/content/management/commands/importchannel.py\", line 42, in download_channel\r\n self._transfer(DOWNLOAD_METHOD, channel_id)\r\n File \"/home/christian/repos/kolibri/kolibri/content/management/commands/importchannel.py\", line 73, in _transfer\r\n if self.is_cancelled():\r\n File \"/home/christian/repos/kolibri/kolibri/tasks/management/commands/base.py\", line 108, in is_cancelled\r\n self.check_for_cancel(last_stage)\r\nTypeError: 'NoneType' object is not callable\r\n\r\n```\r\n\r\n## How to reproduce\r\n\r\n1. Use the `importchannel` command\r\n\r\n## Real-life consequences\r\n\r\n* Breaks my workflow.\n", "before_files": [{"content": "import abc\nfrom collections import namedtuple\n\nfrom barbequeue.exceptions import UserCancelledError\nfrom django.core.management.base import BaseCommand\nfrom tqdm import tqdm\n\nProgress = namedtuple(\n 'Progress',\n [\n 'progress_fraction',\n 'message',\n 'extra_data',\n 'level',\n ]\n)\n\n\nclass ProgressTracker():\n\n def __init__(self, total=100, level=0, update_callback=None):\n\n # set default values\n self.progress = 0\n self.message = \"\"\n self.extra_data = None\n\n # store provided arguments\n self.total = total\n self.level = level\n self.update_callback = update_callback\n\n # initialize the tqdm progress bar\n self.progressbar = tqdm(total=total)\n\n def update_progress(self, increment=1, message=\"\", extra_data=None):\n\n self.progressbar.update(increment)\n\n self.progress += increment\n\n self.message = message\n\n self.extra_data = extra_data\n\n if callable(self.update_callback):\n p = self.get_progress()\n self.update_callback(p.progress_fraction, p)\n\n def get_progress(self):\n\n return Progress(\n progress_fraction=0 if self.total == 0 else self.progress / float(self.total),\n message=self.message,\n extra_data=self.extra_data,\n level=self.level,\n )\n\n def __enter__(self):\n return self.update_progress\n\n def __exit__(self, *exc_details):\n if self.progressbar is not None:\n self.progressbar.close()\n\n\nclass AsyncCommand(BaseCommand):\n \"\"\"A management command with added convenience functions for displaying\n progress to the user.\n\n Rather than implementing handle() (as is for BaseCommand), subclasses, must\n implement handle_async(), which accepts the same arguments as handle().\n\n If ran from the command line, AsynCommand displays a progress bar to the\n user. If ran asynchronously through kolibri.tasks.schedule_command(),\n AsyncCommand sends results through the Progress class to the main Django\n process. Anyone who knows the task id for the command instance can check\n the intermediate progress by looking at the task's AsyncResult.result\n variable.\n\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n self.progresstrackers = []\n\n def _update_all_progress(self, progress_fraction, progress):\n if callable(self.update_progress):\n progress_list = [p.get_progress() for p in self.progresstrackers]\n # HACK (aron): self.update_progress' signature has changed between django_q\n # and iceqube/bbq. It now expects the current progress,\n # the total progress, and then derives the\n # percentage progress manually.\n self.update_progress(progress_list[0].progress_fraction, 1.)\n\n def handle(self, *args, **options):\n self.update_progress = options.pop(\"update_progress\", None)\n self.check_for_cancel = options.pop(\"check_for_cancel\", None)\n return self.handle_async(*args, **options)\n\n def start_progress(self, total=100):\n level = len(self.progresstrackers)\n tracker = ProgressTracker(total=total, level=level, update_callback=self._update_all_progress)\n self.progresstrackers.append(tracker)\n return tracker\n\n def is_cancelled(self, last_stage=\"CANCELLING\"):\n try:\n self.check_for_cancel(last_stage)\n return False\n except UserCancelledError:\n return True\n\n def cancel(self, last_stage=\"CANCELLED\"):\n self.check_for_cancel(last_stage)\n\n @abc.abstractmethod\n def handle_async(self, *args, **options):\n \"\"\"\n handle_async should be reimplemented by any Subclass of AsyncCommand.\n \"\"\"\n pass\n", "path": "kolibri/tasks/management/commands/base.py"}], "after_files": [{"content": "import abc\nfrom collections import namedtuple\n\nfrom barbequeue.exceptions import UserCancelledError\nfrom django.core.management.base import BaseCommand\nfrom tqdm import tqdm\n\nProgress = namedtuple(\n 'Progress',\n [\n 'progress_fraction',\n 'message',\n 'extra_data',\n 'level',\n ]\n)\n\n\nclass ProgressTracker():\n\n def __init__(self, total=100, level=0, update_callback=None):\n\n # set default values\n self.progress = 0\n self.message = \"\"\n self.extra_data = None\n\n # store provided arguments\n self.total = total\n self.level = level\n self.update_callback = update_callback\n\n # initialize the tqdm progress bar\n self.progressbar = tqdm(total=total)\n\n def update_progress(self, increment=1, message=\"\", extra_data=None):\n\n self.progressbar.update(increment)\n\n self.progress += increment\n\n self.message = message\n\n self.extra_data = extra_data\n\n if callable(self.update_callback):\n p = self.get_progress()\n self.update_callback(p.progress_fraction, p)\n\n def get_progress(self):\n\n return Progress(\n progress_fraction=0 if self.total == 0 else self.progress / float(self.total),\n message=self.message,\n extra_data=self.extra_data,\n level=self.level,\n )\n\n def __enter__(self):\n return self.update_progress\n\n def __exit__(self, *exc_details):\n if self.progressbar is not None:\n self.progressbar.close()\n\n\nclass AsyncCommand(BaseCommand):\n \"\"\"A management command with added convenience functions for displaying\n progress to the user.\n\n Rather than implementing handle() (as is for BaseCommand), subclasses, must\n implement handle_async(), which accepts the same arguments as handle().\n\n If ran from the command line, AsynCommand displays a progress bar to the\n user. If ran asynchronously through kolibri.tasks.schedule_command(),\n AsyncCommand sends results through the Progress class to the main Django\n process. Anyone who knows the task id for the command instance can check\n the intermediate progress by looking at the task's AsyncResult.result\n variable.\n\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n self.progresstrackers = []\n\n def _update_all_progress(self, progress_fraction, progress):\n if callable(self.update_progress):\n progress_list = [p.get_progress() for p in self.progresstrackers]\n # HACK (aron): self.update_progress' signature has changed between django_q\n # and iceqube/bbq. It now expects the current progress,\n # the total progress, and then derives the\n # percentage progress manually.\n self.update_progress(progress_list[0].progress_fraction, 1.)\n\n def handle(self, *args, **options):\n self.update_progress = options.pop(\"update_progress\", None)\n self.check_for_cancel = options.pop(\"check_for_cancel\", None)\n return self.handle_async(*args, **options)\n\n def start_progress(self, total=100):\n level = len(self.progresstrackers)\n tracker = ProgressTracker(total=total, level=level, update_callback=self._update_all_progress)\n self.progresstrackers.append(tracker)\n return tracker\n\n def is_cancelled(self, last_stage=\"CANCELLING\"):\n if self.check_for_cancel:\n try:\n self.check_for_cancel(last_stage)\n return False\n except UserCancelledError:\n return True\n return False\n\n def cancel(self, last_stage=\"CANCELLED\"):\n if self.check_for_cancel:\n return self.check_for_cancel(last_stage)\n\n @abc.abstractmethod\n def handle_async(self, *args, **options):\n \"\"\"\n handle_async should be reimplemented by any Subclass of AsyncCommand.\n \"\"\"\n pass\n", "path": "kolibri/tasks/management/commands/base.py"}]}
| 2,034 | 217 |
gh_patches_debug_32958
|
rasdani/github-patches
|
git_diff
|
DataDog__dd-trace-py-498
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Tornado Tracer configuration doesn't have access to settings object for Trace Filtering
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ddtrace/contrib/tornado/__init__.py`
Content:
```
1 """
2 The Tornado integration traces all ``RequestHandler`` defined in a Tornado web application.
3 Auto instrumentation is available using the ``patch`` function that **must be called before**
4 importing the tornado library. The following is an example::
5
6 # patch before importing tornado and concurrent.futures
7 from ddtrace import tracer, patch
8 patch(tornado=True)
9
10 import tornado.web
11 import tornado.gen
12 import tornado.ioloop
13
14 # create your handlers
15 class MainHandler(tornado.web.RequestHandler):
16 @tornado.gen.coroutine
17 def get(self):
18 self.write("Hello, world")
19
20 # create your application
21 app = tornado.web.Application([
22 (r'/', MainHandler),
23 ])
24
25 # and run it as usual
26 app.listen(8888)
27 tornado.ioloop.IOLoop.current().start()
28
29 When any type of ``RequestHandler`` is hit, a request root span is automatically created. If
30 you want to trace more parts of your application, you can use the ``wrap()`` decorator and
31 the ``trace()`` method as usual::
32
33 class MainHandler(tornado.web.RequestHandler):
34 @tornado.gen.coroutine
35 def get(self):
36 yield self.notify()
37 yield self.blocking_method()
38 with tracer.trace('tornado.before_write') as span:
39 # trace more work in the handler
40
41 @tracer.wrap('tornado.executor_handler')
42 @tornado.concurrent.run_on_executor
43 def blocking_method(self):
44 # do something expensive
45
46 @tracer.wrap('tornado.notify', service='tornado-notification')
47 @tornado.gen.coroutine
48 def notify(self):
49 # do something
50
51 Tornado settings can be used to change some tracing configuration, like::
52
53 settings = {
54 'datadog_trace': {
55 'default_service': 'my-tornado-app',
56 'tags': {'env': 'production'},
57 'distributed_tracing': True,
58 },
59 }
60
61 app = tornado.web.Application([
62 (r'/', MainHandler),
63 ], **settings)
64
65 The available settings are:
66
67 * ``default_service`` (default: `tornado-web`): set the service name used by the tracer. Usually
68 this configuration must be updated with a meaningful name.
69 * ``tags`` (default: `{}`): set global tags that should be applied to all spans.
70 * ``enabled`` (default: `True`): define if the tracer is enabled or not. If set to `false`, the
71 code is still instrumented but no spans are sent to the APM agent.
72 * ``distributed_tracing`` (default: `False`): enable distributed tracing if this is called
73 remotely from an instrumented application.
74 We suggest to enable it only for internal services where headers are under your control.
75 * ``agent_hostname`` (default: `localhost`): define the hostname of the APM agent.
76 * ``agent_port`` (default: `8126`): define the port of the APM agent.
77 """
78 from ...utils.importlib import require_modules
79
80
81 required_modules = ['tornado']
82
83 with require_modules(required_modules) as missing_modules:
84 if not missing_modules:
85 from .stack_context import run_with_trace_context, TracerStackContext
86
87 context_provider = TracerStackContext()
88
89 from .patch import patch, unpatch
90
91 __all__ = [
92 'patch',
93 'unpatch',
94 'context_provider',
95 'run_with_trace_context',
96 'TracerStackContext',
97 ]
98
```
Path: `ddtrace/contrib/tornado/application.py`
Content:
```
1 import ddtrace
2
3 from tornado import template
4
5 from . import decorators, context_provider
6 from .constants import CONFIG_KEY
7
8 from ...ext import AppTypes
9
10
11 def tracer_config(__init__, app, args, kwargs):
12 """
13 Wrap Tornado web application so that we can configure services info and
14 tracing settings after the initialization.
15 """
16 # call the Application constructor
17 __init__(*args, **kwargs)
18
19 # default settings
20 settings = {
21 'tracer': ddtrace.tracer,
22 'default_service': 'tornado-web',
23 'distributed_tracing': False,
24 }
25
26 # update defaults with users settings
27 user_settings = app.settings.get(CONFIG_KEY)
28 if user_settings:
29 settings.update(user_settings)
30
31 app.settings[CONFIG_KEY] = settings
32 tracer = settings['tracer']
33 service = settings['default_service']
34
35 # the tracer must use the right Context propagation and wrap executor;
36 # this action is done twice because the patch() method uses the
37 # global tracer while here we can have a different instance (even if
38 # this is not usual).
39 tracer.configure(
40 context_provider=context_provider,
41 wrap_executor=decorators.wrap_executor,
42 enabled=settings.get('enabled', None),
43 hostname=settings.get('agent_hostname', None),
44 port=settings.get('agent_port', None),
45 )
46
47 # set global tags if any
48 tags = settings.get('tags', None)
49 if tags:
50 tracer.set_tags(tags)
51
52 # configure the current service
53 tracer.set_service_info(
54 service=service,
55 app='tornado',
56 app_type=AppTypes.web,
57 )
58
59 # configure the PIN object for template rendering
60 ddtrace.Pin(app='tornado', service=service, app_type='web', tracer=tracer).onto(template)
61
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/ddtrace/contrib/tornado/__init__.py b/ddtrace/contrib/tornado/__init__.py
--- a/ddtrace/contrib/tornado/__init__.py
+++ b/ddtrace/contrib/tornado/__init__.py
@@ -55,6 +55,11 @@
'default_service': 'my-tornado-app',
'tags': {'env': 'production'},
'distributed_tracing': True,
+ 'settings': {
+ 'FILTERS': [
+ FilterRequestsOnUrl(r'http://test\.example\.com'),
+ ],
+ },
},
}
@@ -74,6 +79,7 @@
We suggest to enable it only for internal services where headers are under your control.
* ``agent_hostname`` (default: `localhost`): define the hostname of the APM agent.
* ``agent_port`` (default: `8126`): define the port of the APM agent.
+* ``settings`` (default: ``{}``): Tracer extra settings used to change, for instance, the filtering behavior.
"""
from ...utils.importlib import require_modules
diff --git a/ddtrace/contrib/tornado/application.py b/ddtrace/contrib/tornado/application.py
--- a/ddtrace/contrib/tornado/application.py
+++ b/ddtrace/contrib/tornado/application.py
@@ -32,6 +32,9 @@
tracer = settings['tracer']
service = settings['default_service']
+ # extract extra settings
+ extra_settings = settings.get('settings', {})
+
# the tracer must use the right Context propagation and wrap executor;
# this action is done twice because the patch() method uses the
# global tracer while here we can have a different instance (even if
@@ -42,6 +45,7 @@
enabled=settings.get('enabled', None),
hostname=settings.get('agent_hostname', None),
port=settings.get('agent_port', None),
+ settings=extra_settings,
)
# set global tags if any
|
{"golden_diff": "diff --git a/ddtrace/contrib/tornado/__init__.py b/ddtrace/contrib/tornado/__init__.py\n--- a/ddtrace/contrib/tornado/__init__.py\n+++ b/ddtrace/contrib/tornado/__init__.py\n@@ -55,6 +55,11 @@\n 'default_service': 'my-tornado-app',\n 'tags': {'env': 'production'},\n 'distributed_tracing': True,\n+ 'settings': {\n+ 'FILTERS': [\n+ FilterRequestsOnUrl(r'http://test\\.example\\.com'),\n+ ],\n+ },\n },\n }\n \n@@ -74,6 +79,7 @@\n We suggest to enable it only for internal services where headers are under your control.\n * ``agent_hostname`` (default: `localhost`): define the hostname of the APM agent.\n * ``agent_port`` (default: `8126`): define the port of the APM agent.\n+* ``settings`` (default: ``{}``): Tracer extra settings used to change, for instance, the filtering behavior.\n \"\"\"\n from ...utils.importlib import require_modules\n \ndiff --git a/ddtrace/contrib/tornado/application.py b/ddtrace/contrib/tornado/application.py\n--- a/ddtrace/contrib/tornado/application.py\n+++ b/ddtrace/contrib/tornado/application.py\n@@ -32,6 +32,9 @@\n tracer = settings['tracer']\n service = settings['default_service']\n \n+ # extract extra settings\n+ extra_settings = settings.get('settings', {})\n+\n # the tracer must use the right Context propagation and wrap executor;\n # this action is done twice because the patch() method uses the\n # global tracer while here we can have a different instance (even if\n@@ -42,6 +45,7 @@\n enabled=settings.get('enabled', None),\n hostname=settings.get('agent_hostname', None),\n port=settings.get('agent_port', None),\n+ settings=extra_settings,\n )\n \n # set global tags if any\n", "issue": "Tornado Tracer configuration doesn't have access to settings object for Trace Filtering\n\n", "before_files": [{"content": "\"\"\"\nThe Tornado integration traces all ``RequestHandler`` defined in a Tornado web application.\nAuto instrumentation is available using the ``patch`` function that **must be called before**\nimporting the tornado library. The following is an example::\n\n # patch before importing tornado and concurrent.futures\n from ddtrace import tracer, patch\n patch(tornado=True)\n\n import tornado.web\n import tornado.gen\n import tornado.ioloop\n\n # create your handlers\n class MainHandler(tornado.web.RequestHandler):\n @tornado.gen.coroutine\n def get(self):\n self.write(\"Hello, world\")\n\n # create your application\n app = tornado.web.Application([\n (r'/', MainHandler),\n ])\n\n # and run it as usual\n app.listen(8888)\n tornado.ioloop.IOLoop.current().start()\n\nWhen any type of ``RequestHandler`` is hit, a request root span is automatically created. If\nyou want to trace more parts of your application, you can use the ``wrap()`` decorator and\nthe ``trace()`` method as usual::\n\n class MainHandler(tornado.web.RequestHandler):\n @tornado.gen.coroutine\n def get(self):\n yield self.notify()\n yield self.blocking_method()\n with tracer.trace('tornado.before_write') as span:\n # trace more work in the handler\n\n @tracer.wrap('tornado.executor_handler')\n @tornado.concurrent.run_on_executor\n def blocking_method(self):\n # do something expensive\n\n @tracer.wrap('tornado.notify', service='tornado-notification')\n @tornado.gen.coroutine\n def notify(self):\n # do something\n\nTornado settings can be used to change some tracing configuration, like::\n\n settings = {\n 'datadog_trace': {\n 'default_service': 'my-tornado-app',\n 'tags': {'env': 'production'},\n 'distributed_tracing': True,\n },\n }\n\n app = tornado.web.Application([\n (r'/', MainHandler),\n ], **settings)\n\nThe available settings are:\n\n* ``default_service`` (default: `tornado-web`): set the service name used by the tracer. Usually\n this configuration must be updated with a meaningful name.\n* ``tags`` (default: `{}`): set global tags that should be applied to all spans.\n* ``enabled`` (default: `True`): define if the tracer is enabled or not. If set to `false`, the\n code is still instrumented but no spans are sent to the APM agent.\n* ``distributed_tracing`` (default: `False`): enable distributed tracing if this is called\n remotely from an instrumented application.\n We suggest to enable it only for internal services where headers are under your control.\n* ``agent_hostname`` (default: `localhost`): define the hostname of the APM agent.\n* ``agent_port`` (default: `8126`): define the port of the APM agent.\n\"\"\"\nfrom ...utils.importlib import require_modules\n\n\nrequired_modules = ['tornado']\n\nwith require_modules(required_modules) as missing_modules:\n if not missing_modules:\n from .stack_context import run_with_trace_context, TracerStackContext\n\n context_provider = TracerStackContext()\n\n from .patch import patch, unpatch\n\n __all__ = [\n 'patch',\n 'unpatch',\n 'context_provider',\n 'run_with_trace_context',\n 'TracerStackContext',\n ]\n", "path": "ddtrace/contrib/tornado/__init__.py"}, {"content": "import ddtrace\n\nfrom tornado import template\n\nfrom . import decorators, context_provider\nfrom .constants import CONFIG_KEY\n\nfrom ...ext import AppTypes\n\n\ndef tracer_config(__init__, app, args, kwargs):\n \"\"\"\n Wrap Tornado web application so that we can configure services info and\n tracing settings after the initialization.\n \"\"\"\n # call the Application constructor\n __init__(*args, **kwargs)\n\n # default settings\n settings = {\n 'tracer': ddtrace.tracer,\n 'default_service': 'tornado-web',\n 'distributed_tracing': False,\n }\n\n # update defaults with users settings\n user_settings = app.settings.get(CONFIG_KEY)\n if user_settings:\n settings.update(user_settings)\n\n app.settings[CONFIG_KEY] = settings\n tracer = settings['tracer']\n service = settings['default_service']\n\n # the tracer must use the right Context propagation and wrap executor;\n # this action is done twice because the patch() method uses the\n # global tracer while here we can have a different instance (even if\n # this is not usual).\n tracer.configure(\n context_provider=context_provider,\n wrap_executor=decorators.wrap_executor,\n enabled=settings.get('enabled', None),\n hostname=settings.get('agent_hostname', None),\n port=settings.get('agent_port', None),\n )\n\n # set global tags if any\n tags = settings.get('tags', None)\n if tags:\n tracer.set_tags(tags)\n\n # configure the current service\n tracer.set_service_info(\n service=service,\n app='tornado',\n app_type=AppTypes.web,\n )\n\n # configure the PIN object for template rendering\n ddtrace.Pin(app='tornado', service=service, app_type='web', tracer=tracer).onto(template)\n", "path": "ddtrace/contrib/tornado/application.py"}], "after_files": [{"content": "\"\"\"\nThe Tornado integration traces all ``RequestHandler`` defined in a Tornado web application.\nAuto instrumentation is available using the ``patch`` function that **must be called before**\nimporting the tornado library. The following is an example::\n\n # patch before importing tornado and concurrent.futures\n from ddtrace import tracer, patch\n patch(tornado=True)\n\n import tornado.web\n import tornado.gen\n import tornado.ioloop\n\n # create your handlers\n class MainHandler(tornado.web.RequestHandler):\n @tornado.gen.coroutine\n def get(self):\n self.write(\"Hello, world\")\n\n # create your application\n app = tornado.web.Application([\n (r'/', MainHandler),\n ])\n\n # and run it as usual\n app.listen(8888)\n tornado.ioloop.IOLoop.current().start()\n\nWhen any type of ``RequestHandler`` is hit, a request root span is automatically created. If\nyou want to trace more parts of your application, you can use the ``wrap()`` decorator and\nthe ``trace()`` method as usual::\n\n class MainHandler(tornado.web.RequestHandler):\n @tornado.gen.coroutine\n def get(self):\n yield self.notify()\n yield self.blocking_method()\n with tracer.trace('tornado.before_write') as span:\n # trace more work in the handler\n\n @tracer.wrap('tornado.executor_handler')\n @tornado.concurrent.run_on_executor\n def blocking_method(self):\n # do something expensive\n\n @tracer.wrap('tornado.notify', service='tornado-notification')\n @tornado.gen.coroutine\n def notify(self):\n # do something\n\nTornado settings can be used to change some tracing configuration, like::\n\n settings = {\n 'datadog_trace': {\n 'default_service': 'my-tornado-app',\n 'tags': {'env': 'production'},\n 'distributed_tracing': True,\n 'settings': {\n 'FILTERS': [\n FilterRequestsOnUrl(r'http://test\\.example\\.com'),\n ],\n },\n },\n }\n\n app = tornado.web.Application([\n (r'/', MainHandler),\n ], **settings)\n\nThe available settings are:\n\n* ``default_service`` (default: `tornado-web`): set the service name used by the tracer. Usually\n this configuration must be updated with a meaningful name.\n* ``tags`` (default: `{}`): set global tags that should be applied to all spans.\n* ``enabled`` (default: `True`): define if the tracer is enabled or not. If set to `false`, the\n code is still instrumented but no spans are sent to the APM agent.\n* ``distributed_tracing`` (default: `False`): enable distributed tracing if this is called\n remotely from an instrumented application.\n We suggest to enable it only for internal services where headers are under your control.\n* ``agent_hostname`` (default: `localhost`): define the hostname of the APM agent.\n* ``agent_port`` (default: `8126`): define the port of the APM agent.\n* ``settings`` (default: ``{}``): Tracer extra settings used to change, for instance, the filtering behavior.\n\"\"\"\nfrom ...utils.importlib import require_modules\n\n\nrequired_modules = ['tornado']\n\nwith require_modules(required_modules) as missing_modules:\n if not missing_modules:\n from .stack_context import run_with_trace_context, TracerStackContext\n\n context_provider = TracerStackContext()\n\n from .patch import patch, unpatch\n\n __all__ = [\n 'patch',\n 'unpatch',\n 'context_provider',\n 'run_with_trace_context',\n 'TracerStackContext',\n ]\n", "path": "ddtrace/contrib/tornado/__init__.py"}, {"content": "import ddtrace\n\nfrom tornado import template\n\nfrom . import decorators, context_provider\nfrom .constants import CONFIG_KEY\n\nfrom ...ext import AppTypes\n\n\ndef tracer_config(__init__, app, args, kwargs):\n \"\"\"\n Wrap Tornado web application so that we can configure services info and\n tracing settings after the initialization.\n \"\"\"\n # call the Application constructor\n __init__(*args, **kwargs)\n\n # default settings\n settings = {\n 'tracer': ddtrace.tracer,\n 'default_service': 'tornado-web',\n 'distributed_tracing': False,\n }\n\n # update defaults with users settings\n user_settings = app.settings.get(CONFIG_KEY)\n if user_settings:\n settings.update(user_settings)\n\n app.settings[CONFIG_KEY] = settings\n tracer = settings['tracer']\n service = settings['default_service']\n\n # extract extra settings\n extra_settings = settings.get('settings', {})\n\n # the tracer must use the right Context propagation and wrap executor;\n # this action is done twice because the patch() method uses the\n # global tracer while here we can have a different instance (even if\n # this is not usual).\n tracer.configure(\n context_provider=context_provider,\n wrap_executor=decorators.wrap_executor,\n enabled=settings.get('enabled', None),\n hostname=settings.get('agent_hostname', None),\n port=settings.get('agent_port', None),\n settings=extra_settings,\n )\n\n # set global tags if any\n tags = settings.get('tags', None)\n if tags:\n tracer.set_tags(tags)\n\n # configure the current service\n tracer.set_service_info(\n service=service,\n app='tornado',\n app_type=AppTypes.web,\n )\n\n # configure the PIN object for template rendering\n ddtrace.Pin(app='tornado', service=service, app_type='web', tracer=tracer).onto(template)\n", "path": "ddtrace/contrib/tornado/application.py"}]}
| 1,738 | 441 |
gh_patches_debug_26961
|
rasdani/github-patches
|
git_diff
|
mampfes__hacs_waste_collection_schedule-1888
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Bug]: Basingstoke and Deane is broken since the 8th of Feb
### I Have A Problem With:
A specific source
### What's Your Problem
The service no longer downloads the waste updates. I tried 1.46 and the master.
### Source (if relevant)
_No response_
### Logs
```Shell
Logger: waste_collection_schedule.source_shell
Source: custom_components/waste_collection_schedule/waste_collection_schedule/source_shell.py:136
integration: waste_collection_schedule (documentation)
First occurred: 11:26:41 (1 occurrences)
Last logged: 11:26:41
fetch failed for source Basingstoke and Deane Borough Council: Traceback (most recent call last): File "/config/custom_components/waste_collection_schedule/waste_collection_schedule/source_shell.py", line 134, in fetch entries = self._source.fetch() ^^^^^^^^^^^^^^^^^^^^ File "/config/custom_components/waste_collection_schedule/waste_collection_schedule/source/basingstoke_gov_uk.py", line 65, in fetch date=datetime.strptime(date_str, "%A, %d %B %Y").date(), ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/usr/local/lib/python3.12/_strptime.py", line 554, in _strptime_datetime tt, fraction, gmtoff_fraction = _strptime(data_string, format) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/usr/local/lib/python3.12/_strptime.py", line 333, in _strptime raise ValueError("time data %r does not match format %r" % ValueError: time data 'none / unknown' does not match format '%A, %d %B %Y'
```
### Relevant Configuration
```YAML
waste_collection_schedule:
sources:
- name: basingstoke_gov_uk
args:
uprn: "1000809XXXX"
customize:
- type: Garden
show: True
- type: Waste
show: True
- type: Recycling
show: True
- type: Glass
show: True
```
### Checklist Source Error
- [X] Use the example parameters for your source (often available in the documentation) (don't forget to restart Home Assistant after changing the configuration)
- [X] Checked that the website of your service provider is still working
- [X] Tested my attributes on the service provider website (if possible)
- [X] I have tested with the latest version of the integration (master) (for HACS in the 3 dot menu of the integration click on "Redownload" and choose master as version)
### Checklist Sensor Error
- [X] Checked in the Home Assistant Calendar tab if the event names match the types names (if types argument is used)
### Required
- [X] I have searched past (closed AND opened) issues to see if this bug has already been reported, and it hasn't been.
- [X] I understand that people give their precious time for free, and thus I've done my very best to make this problem as easy as possible to investigate.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `custom_components/waste_collection_schedule/waste_collection_schedule/source/basingstoke_gov_uk.py`
Content:
```
1 from datetime import datetime
2
3 import requests
4 import urllib3
5 from bs4 import BeautifulSoup
6 from waste_collection_schedule import Collection # type: ignore[attr-defined]
7
8 # With verify=True the POST fails due to a SSLCertVerificationError.
9 # Using verify=False works, but is not ideal. The following links may provide a better way of dealing with this:
10 # https://urllib3.readthedocs.io/en/1.26.x/advanced-usage.html#ssl-warnings
11 # https://urllib3.readthedocs.io/en/1.26.x/user-guide.html#ssl
12 # These two lines areused to suppress the InsecureRequestWarning when using verify=False
13 urllib3.disable_warnings()
14
15 TITLE = "Basingstoke and Deane Borough Council"
16 DESCRIPTION = "Source for basingstoke.gov.uk services for Basingstoke and Deane Borough Council, UK."
17 URL = "https://basingstoke.gov.uk"
18 TEST_CASES = {
19 "Test_001": {"uprn": "100060234732"},
20 "Test_002": {"uprn": "100060218986"},
21 "Test_003": {"uprn": 100060235836},
22 "Test_004": {"uprn": 100060224194},
23 }
24 HEADERS = {
25 "user-agent": "Mozilla/5.0",
26 }
27 ICON_MAP = {
28 "WASTE": "mdi:trash-can",
29 "RECYCLING": "mdi:recycle",
30 "GARDEN": "mdi:leaf",
31 "GLASS": "mdi:glass-fragile",
32 }
33
34
35 class Source:
36 def __init__(self, uprn):
37 self._uprn = str(uprn)
38
39 def fetch(self):
40 REQUEST_COOKIES = {
41 "cookie_control_popup": "N",
42 "WhenAreMyBinsCollected": self._uprn,
43 }
44 r = requests.get(
45 "https://www.basingstoke.gov.uk/bincollections",
46 headers=HEADERS,
47 cookies=REQUEST_COOKIES,
48 verify=False,
49 )
50 r.raise_for_status()
51
52 soup = BeautifulSoup(r.text, "html.parser")
53
54 services = soup.findAll("div", {"class": "service"})
55
56 entries = []
57
58 for service in services:
59 waste_type = service.find("h2").text.split(" ")[0]
60 schedule_dates = service.findAll("li")
61 for schedule in schedule_dates:
62 date_str = schedule.text.split("(")[0].strip()
63 entries.append(
64 Collection(
65 date=datetime.strptime(date_str, "%A, %d %B %Y").date(),
66 t=waste_type,
67 icon=ICON_MAP.get(waste_type.upper()),
68 )
69 )
70
71 return entries
72
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/basingstoke_gov_uk.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/basingstoke_gov_uk.py
--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/basingstoke_gov_uk.py
+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/basingstoke_gov_uk.py
@@ -1,3 +1,4 @@
+import logging
from datetime import datetime
import requests
@@ -30,6 +31,7 @@
"GARDEN": "mdi:leaf",
"GLASS": "mdi:glass-fragile",
}
+LOGGER = logging.getLogger(__name__)
class Source:
@@ -60,9 +62,17 @@
schedule_dates = service.findAll("li")
for schedule in schedule_dates:
date_str = schedule.text.split("(")[0].strip()
+ try:
+ date = datetime.strptime(date_str, "%A, %d %B %Y").date()
+ except ValueError as e:
+ LOGGER.warning(
+ f"Failed to parse date '{date_str}' for wastetype {waste_type}: {e}"
+ )
+ continue
+
entries.append(
Collection(
- date=datetime.strptime(date_str, "%A, %d %B %Y").date(),
+ date=date,
t=waste_type,
icon=ICON_MAP.get(waste_type.upper()),
)
|
{"golden_diff": "diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/basingstoke_gov_uk.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/basingstoke_gov_uk.py\n--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/basingstoke_gov_uk.py\n+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/basingstoke_gov_uk.py\n@@ -1,3 +1,4 @@\n+import logging\n from datetime import datetime\n \n import requests\n@@ -30,6 +31,7 @@\n \"GARDEN\": \"mdi:leaf\",\n \"GLASS\": \"mdi:glass-fragile\",\n }\n+LOGGER = logging.getLogger(__name__)\n \n \n class Source:\n@@ -60,9 +62,17 @@\n schedule_dates = service.findAll(\"li\")\n for schedule in schedule_dates:\n date_str = schedule.text.split(\"(\")[0].strip()\n+ try:\n+ date = datetime.strptime(date_str, \"%A, %d %B %Y\").date()\n+ except ValueError as e:\n+ LOGGER.warning(\n+ f\"Failed to parse date '{date_str}' for wastetype {waste_type}: {e}\"\n+ )\n+ continue\n+\n entries.append(\n Collection(\n- date=datetime.strptime(date_str, \"%A, %d %B %Y\").date(),\n+ date=date,\n t=waste_type,\n icon=ICON_MAP.get(waste_type.upper()),\n )\n", "issue": "[Bug]: Basingstoke and Deane is broken since the 8th of Feb\n### I Have A Problem With:\n\nA specific source\n\n### What's Your Problem\n\nThe service no longer downloads the waste updates. I tried 1.46 and the master.\n\n### Source (if relevant)\n\n_No response_\n\n### Logs\n\n```Shell\nLogger: waste_collection_schedule.source_shell\r\nSource: custom_components/waste_collection_schedule/waste_collection_schedule/source_shell.py:136\r\nintegration: waste_collection_schedule (documentation)\r\nFirst occurred: 11:26:41 (1 occurrences)\r\nLast logged: 11:26:41\r\n\r\nfetch failed for source Basingstoke and Deane Borough Council: Traceback (most recent call last): File \"/config/custom_components/waste_collection_schedule/waste_collection_schedule/source_shell.py\", line 134, in fetch entries = self._source.fetch() ^^^^^^^^^^^^^^^^^^^^ File \"/config/custom_components/waste_collection_schedule/waste_collection_schedule/source/basingstoke_gov_uk.py\", line 65, in fetch date=datetime.strptime(date_str, \"%A, %d %B %Y\").date(), ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File \"/usr/local/lib/python3.12/_strptime.py\", line 554, in _strptime_datetime tt, fraction, gmtoff_fraction = _strptime(data_string, format) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File \"/usr/local/lib/python3.12/_strptime.py\", line 333, in _strptime raise ValueError(\"time data %r does not match format %r\" % ValueError: time data 'none / unknown' does not match format '%A, %d %B %Y'\n```\n\n\n### Relevant Configuration\n\n```YAML\nwaste_collection_schedule:\r\n sources:\r\n - name: basingstoke_gov_uk\r\n args:\r\n uprn: \"1000809XXXX\"\r\n customize:\r\n - type: Garden\r\n show: True\r\n - type: Waste\r\n show: True\r\n - type: Recycling\r\n show: True\r\n - type: Glass\r\n show: True\n```\n\n\n### Checklist Source Error\n\n- [X] Use the example parameters for your source (often available in the documentation) (don't forget to restart Home Assistant after changing the configuration)\n- [X] Checked that the website of your service provider is still working\n- [X] Tested my attributes on the service provider website (if possible)\n- [X] I have tested with the latest version of the integration (master) (for HACS in the 3 dot menu of the integration click on \"Redownload\" and choose master as version)\n\n### Checklist Sensor Error\n\n- [X] Checked in the Home Assistant Calendar tab if the event names match the types names (if types argument is used)\n\n### Required\n\n- [X] I have searched past (closed AND opened) issues to see if this bug has already been reported, and it hasn't been.\n- [X] I understand that people give their precious time for free, and thus I've done my very best to make this problem as easy as possible to investigate.\n", "before_files": [{"content": "from datetime import datetime\n\nimport requests\nimport urllib3\nfrom bs4 import BeautifulSoup\nfrom waste_collection_schedule import Collection # type: ignore[attr-defined]\n\n# With verify=True the POST fails due to a SSLCertVerificationError.\n# Using verify=False works, but is not ideal. The following links may provide a better way of dealing with this:\n# https://urllib3.readthedocs.io/en/1.26.x/advanced-usage.html#ssl-warnings\n# https://urllib3.readthedocs.io/en/1.26.x/user-guide.html#ssl\n# These two lines areused to suppress the InsecureRequestWarning when using verify=False\nurllib3.disable_warnings()\n\nTITLE = \"Basingstoke and Deane Borough Council\"\nDESCRIPTION = \"Source for basingstoke.gov.uk services for Basingstoke and Deane Borough Council, UK.\"\nURL = \"https://basingstoke.gov.uk\"\nTEST_CASES = {\n \"Test_001\": {\"uprn\": \"100060234732\"},\n \"Test_002\": {\"uprn\": \"100060218986\"},\n \"Test_003\": {\"uprn\": 100060235836},\n \"Test_004\": {\"uprn\": 100060224194},\n}\nHEADERS = {\n \"user-agent\": \"Mozilla/5.0\",\n}\nICON_MAP = {\n \"WASTE\": \"mdi:trash-can\",\n \"RECYCLING\": \"mdi:recycle\",\n \"GARDEN\": \"mdi:leaf\",\n \"GLASS\": \"mdi:glass-fragile\",\n}\n\n\nclass Source:\n def __init__(self, uprn):\n self._uprn = str(uprn)\n\n def fetch(self):\n REQUEST_COOKIES = {\n \"cookie_control_popup\": \"N\",\n \"WhenAreMyBinsCollected\": self._uprn,\n }\n r = requests.get(\n \"https://www.basingstoke.gov.uk/bincollections\",\n headers=HEADERS,\n cookies=REQUEST_COOKIES,\n verify=False,\n )\n r.raise_for_status()\n\n soup = BeautifulSoup(r.text, \"html.parser\")\n\n services = soup.findAll(\"div\", {\"class\": \"service\"})\n\n entries = []\n\n for service in services:\n waste_type = service.find(\"h2\").text.split(\" \")[0]\n schedule_dates = service.findAll(\"li\")\n for schedule in schedule_dates:\n date_str = schedule.text.split(\"(\")[0].strip()\n entries.append(\n Collection(\n date=datetime.strptime(date_str, \"%A, %d %B %Y\").date(),\n t=waste_type,\n icon=ICON_MAP.get(waste_type.upper()),\n )\n )\n\n return entries\n", "path": "custom_components/waste_collection_schedule/waste_collection_schedule/source/basingstoke_gov_uk.py"}], "after_files": [{"content": "import logging\nfrom datetime import datetime\n\nimport requests\nimport urllib3\nfrom bs4 import BeautifulSoup\nfrom waste_collection_schedule import Collection # type: ignore[attr-defined]\n\n# With verify=True the POST fails due to a SSLCertVerificationError.\n# Using verify=False works, but is not ideal. The following links may provide a better way of dealing with this:\n# https://urllib3.readthedocs.io/en/1.26.x/advanced-usage.html#ssl-warnings\n# https://urllib3.readthedocs.io/en/1.26.x/user-guide.html#ssl\n# These two lines areused to suppress the InsecureRequestWarning when using verify=False\nurllib3.disable_warnings()\n\nTITLE = \"Basingstoke and Deane Borough Council\"\nDESCRIPTION = \"Source for basingstoke.gov.uk services for Basingstoke and Deane Borough Council, UK.\"\nURL = \"https://basingstoke.gov.uk\"\nTEST_CASES = {\n \"Test_001\": {\"uprn\": \"100060234732\"},\n \"Test_002\": {\"uprn\": \"100060218986\"},\n \"Test_003\": {\"uprn\": 100060235836},\n \"Test_004\": {\"uprn\": 100060224194},\n}\nHEADERS = {\n \"user-agent\": \"Mozilla/5.0\",\n}\nICON_MAP = {\n \"WASTE\": \"mdi:trash-can\",\n \"RECYCLING\": \"mdi:recycle\",\n \"GARDEN\": \"mdi:leaf\",\n \"GLASS\": \"mdi:glass-fragile\",\n}\nLOGGER = logging.getLogger(__name__)\n\n\nclass Source:\n def __init__(self, uprn):\n self._uprn = str(uprn)\n\n def fetch(self):\n REQUEST_COOKIES = {\n \"cookie_control_popup\": \"N\",\n \"WhenAreMyBinsCollected\": self._uprn,\n }\n r = requests.get(\n \"https://www.basingstoke.gov.uk/bincollections\",\n headers=HEADERS,\n cookies=REQUEST_COOKIES,\n verify=False,\n )\n r.raise_for_status()\n\n soup = BeautifulSoup(r.text, \"html.parser\")\n\n services = soup.findAll(\"div\", {\"class\": \"service\"})\n\n entries = []\n\n for service in services:\n waste_type = service.find(\"h2\").text.split(\" \")[0]\n schedule_dates = service.findAll(\"li\")\n for schedule in schedule_dates:\n date_str = schedule.text.split(\"(\")[0].strip()\n try:\n date = datetime.strptime(date_str, \"%A, %d %B %Y\").date()\n except ValueError as e:\n LOGGER.warning(\n f\"Failed to parse date '{date_str}' for wastetype {waste_type}: {e}\"\n )\n continue\n\n entries.append(\n Collection(\n date=date,\n t=waste_type,\n icon=ICON_MAP.get(waste_type.upper()),\n )\n )\n\n return entries\n", "path": "custom_components/waste_collection_schedule/waste_collection_schedule/source/basingstoke_gov_uk.py"}]}
| 1,712 | 324 |
gh_patches_debug_14671
|
rasdani/github-patches
|
git_diff
|
Lightning-AI__torchmetrics-1129
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ClasswiseWrapper yields different results
## 🐛 Bug
Using `JaccardIndex` with `ClasswiseWrapper` results in different values than without `ClasswiseWrapper`.
### To Reproduce
Steps to reproduce the behavior...
Run the code snippet.
#### Code sample
```python
from torchmetrics import ClasswiseWrapper, JaccardIndex
import torch
target = torch.randint(0, 2, (10, 25, 25))
preds = [ torch.randint(0, 2, (10, 25, 25)) for i in range (3)]
jaccard_single = JaccardIndex(num_classes=2, average=None)
class_wrapper = ClasswiseWrapper(
JaccardIndex(num_classes=2, average=None),
labels=["class1", "class2"]
)
for p in preds:
print("Metric ",jaccard_single(p,target))
print("Wraped metric ",class_wrapper(p,target))
```
The code produces the following output:
```
Metric tensor([0.3351, 0.3333])
Wraped metric {'jaccardindex_class1': tensor(0.3351), 'jaccardindex_class2': tensor(0.3333)}
Metric tensor([0.3293, 0.3357])
Wraped metric {'jaccardindex_class1': tensor(0.3322), 'jaccardindex_class2': tensor(0.3345)}
Metric tensor([0.3424, 0.3435])
Wraped metric {'jaccardindex_class1': tensor(0.3356), 'jaccardindex_class2': tensor(0.3375)}
```
### Expected behavior
I would expect that the wrapped metric outputs the same values as the simple `JaccardIndex`.
### Environment
- TorchMetrics version (and how you installed TM, e.g. `conda`, `pip`, build from source):
Installed version 0.9.2 using pip
- Python & PyTorch Version (e.g., 1.0):
Tested with Python 3.8.14 and pytorch 1.3.1
- Any other relevant information such as OS (e.g., Linux):
### Additional context
<!-- Add any other context about the problem here. -->
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/torchmetrics/wrappers/classwise.py`
Content:
```
1 from typing import Any, Dict, List, Optional
2
3 from torch import Tensor
4
5 from torchmetrics import Metric
6
7
8 class ClasswiseWrapper(Metric):
9 """Wrapper class for altering the output of classification metrics that returns multiple values to include
10 label information.
11
12 Args:
13 metric: base metric that should be wrapped. It is assumed that the metric outputs a single
14 tensor that is split along the first dimension.
15 labels: list of strings indicating the different classes.
16
17 Example:
18 >>> import torch
19 >>> _ = torch.manual_seed(42)
20 >>> from torchmetrics import Accuracy, ClasswiseWrapper
21 >>> metric = ClasswiseWrapper(Accuracy(num_classes=3, average=None))
22 >>> preds = torch.randn(10, 3).softmax(dim=-1)
23 >>> target = torch.randint(3, (10,))
24 >>> metric(preds, target)
25 {'accuracy_0': tensor(0.5000), 'accuracy_1': tensor(0.7500), 'accuracy_2': tensor(0.)}
26
27 Example (labels as list of strings):
28 >>> import torch
29 >>> from torchmetrics import Accuracy, ClasswiseWrapper
30 >>> metric = ClasswiseWrapper(
31 ... Accuracy(num_classes=3, average=None),
32 ... labels=["horse", "fish", "dog"]
33 ... )
34 >>> preds = torch.randn(10, 3).softmax(dim=-1)
35 >>> target = torch.randint(3, (10,))
36 >>> metric(preds, target)
37 {'accuracy_horse': tensor(0.3333), 'accuracy_fish': tensor(0.6667), 'accuracy_dog': tensor(0.)}
38
39 Example (in metric collection):
40 >>> import torch
41 >>> from torchmetrics import Accuracy, ClasswiseWrapper, MetricCollection, Recall
42 >>> labels = ["horse", "fish", "dog"]
43 >>> metric = MetricCollection(
44 ... {'accuracy': ClasswiseWrapper(Accuracy(num_classes=3, average=None), labels),
45 ... 'recall': ClasswiseWrapper(Recall(num_classes=3, average=None), labels)}
46 ... )
47 >>> preds = torch.randn(10, 3).softmax(dim=-1)
48 >>> target = torch.randint(3, (10,))
49 >>> metric(preds, target) # doctest: +NORMALIZE_WHITESPACE
50 {'accuracy_horse': tensor(0.), 'accuracy_fish': tensor(0.3333), 'accuracy_dog': tensor(0.4000),
51 'recall_horse': tensor(0.), 'recall_fish': tensor(0.3333), 'recall_dog': tensor(0.4000)}
52 """
53
54 def __init__(self, metric: Metric, labels: Optional[List[str]] = None) -> None:
55 super().__init__()
56 if not isinstance(metric, Metric):
57 raise ValueError(f"Expected argument `metric` to be an instance of `torchmetrics.Metric` but got {metric}")
58 if labels is not None and not (isinstance(labels, list) and all(isinstance(lab, str) for lab in labels)):
59 raise ValueError(f"Expected argument `labels` to either be `None` or a list of strings but got {labels}")
60 self.metric = metric
61 self.labels = labels
62
63 def _convert(self, x: Tensor) -> Dict[str, Any]:
64 name = self.metric.__class__.__name__.lower()
65 if self.labels is None:
66 return {f"{name}_{i}": val for i, val in enumerate(x)}
67 return {f"{name}_{lab}": val for lab, val in zip(self.labels, x)}
68
69 def update(self, *args: Any, **kwargs: Any) -> None:
70 self.metric.update(*args, **kwargs)
71
72 def compute(self) -> Dict[str, Tensor]:
73 return self._convert(self.metric.compute())
74
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/torchmetrics/wrappers/classwise.py b/src/torchmetrics/wrappers/classwise.py
--- a/src/torchmetrics/wrappers/classwise.py
+++ b/src/torchmetrics/wrappers/classwise.py
@@ -51,6 +51,8 @@
'recall_horse': tensor(0.), 'recall_fish': tensor(0.3333), 'recall_dog': tensor(0.4000)}
"""
+ full_state_update: Optional[bool] = True
+
def __init__(self, metric: Metric, labels: Optional[List[str]] = None) -> None:
super().__init__()
if not isinstance(metric, Metric):
@@ -71,3 +73,6 @@
def compute(self) -> Dict[str, Tensor]:
return self._convert(self.metric.compute())
+
+ def reset(self) -> None:
+ self.metric.reset()
|
{"golden_diff": "diff --git a/src/torchmetrics/wrappers/classwise.py b/src/torchmetrics/wrappers/classwise.py\n--- a/src/torchmetrics/wrappers/classwise.py\n+++ b/src/torchmetrics/wrappers/classwise.py\n@@ -51,6 +51,8 @@\n 'recall_horse': tensor(0.), 'recall_fish': tensor(0.3333), 'recall_dog': tensor(0.4000)}\n \"\"\"\n \n+ full_state_update: Optional[bool] = True\n+\n def __init__(self, metric: Metric, labels: Optional[List[str]] = None) -> None:\n super().__init__()\n if not isinstance(metric, Metric):\n@@ -71,3 +73,6 @@\n \n def compute(self) -> Dict[str, Tensor]:\n return self._convert(self.metric.compute())\n+\n+ def reset(self) -> None:\n+ self.metric.reset()\n", "issue": "ClasswiseWrapper yields different results \n## \ud83d\udc1b Bug\r\n\r\nUsing `JaccardIndex` with `ClasswiseWrapper` results in different values than without `ClasswiseWrapper`. \r\n\r\n### To Reproduce\r\n\r\nSteps to reproduce the behavior...\r\n\r\nRun the code snippet. \r\n\r\n#### Code sample\r\n\r\n```python\r\nfrom torchmetrics import ClasswiseWrapper, JaccardIndex\r\nimport torch \r\n\r\ntarget = torch.randint(0, 2, (10, 25, 25))\r\npreds = [ torch.randint(0, 2, (10, 25, 25)) for i in range (3)]\r\njaccard_single = JaccardIndex(num_classes=2, average=None)\r\nclass_wrapper = ClasswiseWrapper(\r\n JaccardIndex(num_classes=2, average=None),\r\n labels=[\"class1\", \"class2\"]\r\n )\r\n\r\nfor p in preds:\r\n print(\"Metric \",jaccard_single(p,target)) \r\n print(\"Wraped metric \",class_wrapper(p,target))\r\n```\r\n\r\nThe code produces the following output: \r\n\r\n```\r\nMetric tensor([0.3351, 0.3333])\r\nWraped metric {'jaccardindex_class1': tensor(0.3351), 'jaccardindex_class2': tensor(0.3333)}\r\nMetric tensor([0.3293, 0.3357])\r\nWraped metric {'jaccardindex_class1': tensor(0.3322), 'jaccardindex_class2': tensor(0.3345)}\r\nMetric tensor([0.3424, 0.3435])\r\nWraped metric {'jaccardindex_class1': tensor(0.3356), 'jaccardindex_class2': tensor(0.3375)}\r\n```\r\n\r\n### Expected behavior\r\n\r\nI would expect that the wrapped metric outputs the same values as the simple `JaccardIndex`. \r\n\r\n### Environment\r\n\r\n- TorchMetrics version (and how you installed TM, e.g. `conda`, `pip`, build from source): \r\n Installed version 0.9.2 using pip \r\n- Python & PyTorch Version (e.g., 1.0):\r\n Tested with Python 3.8.14 and pytorch 1.3.1\r\n- Any other relevant information such as OS (e.g., Linux):\r\n\r\n### Additional context\r\n\r\n<!-- Add any other context about the problem here. -->\r\n\n", "before_files": [{"content": "from typing import Any, Dict, List, Optional\n\nfrom torch import Tensor\n\nfrom torchmetrics import Metric\n\n\nclass ClasswiseWrapper(Metric):\n \"\"\"Wrapper class for altering the output of classification metrics that returns multiple values to include\n label information.\n\n Args:\n metric: base metric that should be wrapped. It is assumed that the metric outputs a single\n tensor that is split along the first dimension.\n labels: list of strings indicating the different classes.\n\n Example:\n >>> import torch\n >>> _ = torch.manual_seed(42)\n >>> from torchmetrics import Accuracy, ClasswiseWrapper\n >>> metric = ClasswiseWrapper(Accuracy(num_classes=3, average=None))\n >>> preds = torch.randn(10, 3).softmax(dim=-1)\n >>> target = torch.randint(3, (10,))\n >>> metric(preds, target)\n {'accuracy_0': tensor(0.5000), 'accuracy_1': tensor(0.7500), 'accuracy_2': tensor(0.)}\n\n Example (labels as list of strings):\n >>> import torch\n >>> from torchmetrics import Accuracy, ClasswiseWrapper\n >>> metric = ClasswiseWrapper(\n ... Accuracy(num_classes=3, average=None),\n ... labels=[\"horse\", \"fish\", \"dog\"]\n ... )\n >>> preds = torch.randn(10, 3).softmax(dim=-1)\n >>> target = torch.randint(3, (10,))\n >>> metric(preds, target)\n {'accuracy_horse': tensor(0.3333), 'accuracy_fish': tensor(0.6667), 'accuracy_dog': tensor(0.)}\n\n Example (in metric collection):\n >>> import torch\n >>> from torchmetrics import Accuracy, ClasswiseWrapper, MetricCollection, Recall\n >>> labels = [\"horse\", \"fish\", \"dog\"]\n >>> metric = MetricCollection(\n ... {'accuracy': ClasswiseWrapper(Accuracy(num_classes=3, average=None), labels),\n ... 'recall': ClasswiseWrapper(Recall(num_classes=3, average=None), labels)}\n ... )\n >>> preds = torch.randn(10, 3).softmax(dim=-1)\n >>> target = torch.randint(3, (10,))\n >>> metric(preds, target) # doctest: +NORMALIZE_WHITESPACE\n {'accuracy_horse': tensor(0.), 'accuracy_fish': tensor(0.3333), 'accuracy_dog': tensor(0.4000),\n 'recall_horse': tensor(0.), 'recall_fish': tensor(0.3333), 'recall_dog': tensor(0.4000)}\n \"\"\"\n\n def __init__(self, metric: Metric, labels: Optional[List[str]] = None) -> None:\n super().__init__()\n if not isinstance(metric, Metric):\n raise ValueError(f\"Expected argument `metric` to be an instance of `torchmetrics.Metric` but got {metric}\")\n if labels is not None and not (isinstance(labels, list) and all(isinstance(lab, str) for lab in labels)):\n raise ValueError(f\"Expected argument `labels` to either be `None` or a list of strings but got {labels}\")\n self.metric = metric\n self.labels = labels\n\n def _convert(self, x: Tensor) -> Dict[str, Any]:\n name = self.metric.__class__.__name__.lower()\n if self.labels is None:\n return {f\"{name}_{i}\": val for i, val in enumerate(x)}\n return {f\"{name}_{lab}\": val for lab, val in zip(self.labels, x)}\n\n def update(self, *args: Any, **kwargs: Any) -> None:\n self.metric.update(*args, **kwargs)\n\n def compute(self) -> Dict[str, Tensor]:\n return self._convert(self.metric.compute())\n", "path": "src/torchmetrics/wrappers/classwise.py"}], "after_files": [{"content": "from typing import Any, Dict, List, Optional\n\nfrom torch import Tensor\n\nfrom torchmetrics import Metric\n\n\nclass ClasswiseWrapper(Metric):\n \"\"\"Wrapper class for altering the output of classification metrics that returns multiple values to include\n label information.\n\n Args:\n metric: base metric that should be wrapped. It is assumed that the metric outputs a single\n tensor that is split along the first dimension.\n labels: list of strings indicating the different classes.\n\n Example:\n >>> import torch\n >>> _ = torch.manual_seed(42)\n >>> from torchmetrics import Accuracy, ClasswiseWrapper\n >>> metric = ClasswiseWrapper(Accuracy(num_classes=3, average=None))\n >>> preds = torch.randn(10, 3).softmax(dim=-1)\n >>> target = torch.randint(3, (10,))\n >>> metric(preds, target)\n {'accuracy_0': tensor(0.5000), 'accuracy_1': tensor(0.7500), 'accuracy_2': tensor(0.)}\n\n Example (labels as list of strings):\n >>> import torch\n >>> from torchmetrics import Accuracy, ClasswiseWrapper\n >>> metric = ClasswiseWrapper(\n ... Accuracy(num_classes=3, average=None),\n ... labels=[\"horse\", \"fish\", \"dog\"]\n ... )\n >>> preds = torch.randn(10, 3).softmax(dim=-1)\n >>> target = torch.randint(3, (10,))\n >>> metric(preds, target)\n {'accuracy_horse': tensor(0.3333), 'accuracy_fish': tensor(0.6667), 'accuracy_dog': tensor(0.)}\n\n Example (in metric collection):\n >>> import torch\n >>> from torchmetrics import Accuracy, ClasswiseWrapper, MetricCollection, Recall\n >>> labels = [\"horse\", \"fish\", \"dog\"]\n >>> metric = MetricCollection(\n ... {'accuracy': ClasswiseWrapper(Accuracy(num_classes=3, average=None), labels),\n ... 'recall': ClasswiseWrapper(Recall(num_classes=3, average=None), labels)}\n ... )\n >>> preds = torch.randn(10, 3).softmax(dim=-1)\n >>> target = torch.randint(3, (10,))\n >>> metric(preds, target) # doctest: +NORMALIZE_WHITESPACE\n {'accuracy_horse': tensor(0.), 'accuracy_fish': tensor(0.3333), 'accuracy_dog': tensor(0.4000),\n 'recall_horse': tensor(0.), 'recall_fish': tensor(0.3333), 'recall_dog': tensor(0.4000)}\n \"\"\"\n\n full_state_update: Optional[bool] = True\n\n def __init__(self, metric: Metric, labels: Optional[List[str]] = None) -> None:\n super().__init__()\n if not isinstance(metric, Metric):\n raise ValueError(f\"Expected argument `metric` to be an instance of `torchmetrics.Metric` but got {metric}\")\n if labels is not None and not (isinstance(labels, list) and all(isinstance(lab, str) for lab in labels)):\n raise ValueError(f\"Expected argument `labels` to either be `None` or a list of strings but got {labels}\")\n self.metric = metric\n self.labels = labels\n\n def _convert(self, x: Tensor) -> Dict[str, Any]:\n name = self.metric.__class__.__name__.lower()\n if self.labels is None:\n return {f\"{name}_{i}\": val for i, val in enumerate(x)}\n return {f\"{name}_{lab}\": val for lab, val in zip(self.labels, x)}\n\n def update(self, *args: Any, **kwargs: Any) -> None:\n self.metric.update(*args, **kwargs)\n\n def compute(self) -> Dict[str, Tensor]:\n return self._convert(self.metric.compute())\n\n def reset(self) -> None:\n self.metric.reset()\n", "path": "src/torchmetrics/wrappers/classwise.py"}]}
| 1,782 | 204 |
gh_patches_debug_32666
|
rasdani/github-patches
|
git_diff
|
opsdroid__opsdroid-1776
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Infinite self-responses in Mattermost connector
After fixing the Mattermost connector with PR #1774 it turns out it suffers from the same infinite self-response problem (#1691) as was fixed for the Gitter connector in #1692.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `opsdroid/connector/mattermost/__init__.py`
Content:
```
1 """A connector for Mattermost."""
2 import logging
3 import json
4
5 from mattermostdriver import Driver, Websocket
6 from voluptuous import Required
7
8 from opsdroid.connector import Connector, register_event
9 from opsdroid.events import Message
10
11 _LOGGER = logging.getLogger(__name__)
12 CONFIG_SCHEMA = {
13 Required("token"): str,
14 Required("url"): str,
15 Required("team-name"): str,
16 "scheme": str,
17 "port": int,
18 "ssl-verify": bool,
19 "connect-timeout": int,
20 }
21
22
23 class ConnectorMattermost(Connector):
24 """A connector for Mattermost."""
25
26 def __init__(self, config, opsdroid=None):
27 """Create the connector."""
28 super().__init__(config, opsdroid=opsdroid)
29 _LOGGER.debug(_("Starting Mattermost connector"))
30 self.name = "mattermost"
31 self.token = config["token"]
32 self.url = config["url"]
33 self.team_name = config["team-name"]
34 self.scheme = config.get("scheme", "https")
35 self.port = config.get("port", 8065)
36 self.verify = config.get("ssl-verify", True)
37 self.timeout = config.get("connect-timeout", 30)
38 self.request_timeout = None
39 self.mfa_token = None
40 self.debug = False
41 self.listening = True
42
43 self.mm_driver = Driver(
44 {
45 "url": self.url,
46 "token": self.token,
47 "scheme": self.scheme,
48 "port": self.port,
49 "verify": self.verify,
50 "timeout": self.timeout,
51 "request_timeout": self.request_timeout,
52 "mfa_token": self.mfa_token,
53 "debug": self.debug,
54 }
55 )
56
57 async def connect(self):
58 """Connect to the chat service."""
59 _LOGGER.info(_("Connecting to Mattermost"))
60
61 login_response = self.mm_driver.login()
62
63 _LOGGER.info(login_response)
64
65 if "id" in login_response:
66 self.bot_id = login_response["id"]
67 if "username" in login_response:
68 self.bot_name = login_response["username"]
69
70 _LOGGER.info(_("Connected as %s"), self.bot_name)
71
72 self.mm_driver.websocket = Websocket(
73 self.mm_driver.options, self.mm_driver.client.token
74 )
75
76 _LOGGER.info(_("Connected successfully"))
77
78 async def disconnect(self):
79 """Disconnect from Mattermost."""
80 self.listening = False
81 self.mm_driver.logout()
82
83 async def listen(self):
84 """Listen for and parse new messages."""
85 await self.mm_driver.websocket.connect(self.process_message)
86
87 async def process_message(self, raw_message):
88 """Process a raw message and pass it to the parser."""
89 _LOGGER.info(raw_message)
90
91 message = json.loads(raw_message)
92
93 if "event" in message and message["event"] == "posted":
94 data = message["data"]
95 post = json.loads(data["post"])
96 await self.opsdroid.parse(
97 Message(
98 text=post["message"],
99 user=data["sender_name"],
100 target=data["channel_name"],
101 connector=self,
102 raw_event=message,
103 )
104 )
105
106 @register_event(Message)
107 async def send_message(self, message):
108 """Respond with a message."""
109 _LOGGER.debug(
110 _("Responding with: '%s' in room %s"), message.text, message.target
111 )
112 channel_id = self.mm_driver.channels.get_channel_by_name_and_team_name(
113 self.team_name, message.target
114 )["id"]
115 self.mm_driver.posts.create_post(
116 options={"channel_id": channel_id, "message": message.text}
117 )
118
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/opsdroid/connector/mattermost/__init__.py b/opsdroid/connector/mattermost/__init__.py
--- a/opsdroid/connector/mattermost/__init__.py
+++ b/opsdroid/connector/mattermost/__init__.py
@@ -39,6 +39,7 @@
self.mfa_token = None
self.debug = False
self.listening = True
+ self.bot_id = None
self.mm_driver = Driver(
{
@@ -66,8 +67,7 @@
self.bot_id = login_response["id"]
if "username" in login_response:
self.bot_name = login_response["username"]
-
- _LOGGER.info(_("Connected as %s"), self.bot_name)
+ _LOGGER.info(_("Connected as %s"), self.bot_name)
self.mm_driver.websocket = Websocket(
self.mm_driver.options, self.mm_driver.client.token
@@ -93,15 +93,18 @@
if "event" in message and message["event"] == "posted":
data = message["data"]
post = json.loads(data["post"])
- await self.opsdroid.parse(
- Message(
- text=post["message"],
- user=data["sender_name"],
- target=data["channel_name"],
- connector=self,
- raw_event=message,
+ # don't parse our own messages (https://github.com/opsdroid/opsdroid/issues/1775)
+ # (but also parse if somehow our bot_id is unknown, like in the unit tests)
+ if self.bot_id is None or self.bot_id != post["user_id"]:
+ await self.opsdroid.parse(
+ Message(
+ text=post["message"],
+ user=data["sender_name"],
+ target=data["channel_name"],
+ connector=self,
+ raw_event=message,
+ )
)
- )
@register_event(Message)
async def send_message(self, message):
|
{"golden_diff": "diff --git a/opsdroid/connector/mattermost/__init__.py b/opsdroid/connector/mattermost/__init__.py\n--- a/opsdroid/connector/mattermost/__init__.py\n+++ b/opsdroid/connector/mattermost/__init__.py\n@@ -39,6 +39,7 @@\n self.mfa_token = None\n self.debug = False\n self.listening = True\n+ self.bot_id = None\n \n self.mm_driver = Driver(\n {\n@@ -66,8 +67,7 @@\n self.bot_id = login_response[\"id\"]\n if \"username\" in login_response:\n self.bot_name = login_response[\"username\"]\n-\n- _LOGGER.info(_(\"Connected as %s\"), self.bot_name)\n+ _LOGGER.info(_(\"Connected as %s\"), self.bot_name)\n \n self.mm_driver.websocket = Websocket(\n self.mm_driver.options, self.mm_driver.client.token\n@@ -93,15 +93,18 @@\n if \"event\" in message and message[\"event\"] == \"posted\":\n data = message[\"data\"]\n post = json.loads(data[\"post\"])\n- await self.opsdroid.parse(\n- Message(\n- text=post[\"message\"],\n- user=data[\"sender_name\"],\n- target=data[\"channel_name\"],\n- connector=self,\n- raw_event=message,\n+ # don't parse our own messages (https://github.com/opsdroid/opsdroid/issues/1775)\n+ # (but also parse if somehow our bot_id is unknown, like in the unit tests)\n+ if self.bot_id is None or self.bot_id != post[\"user_id\"]:\n+ await self.opsdroid.parse(\n+ Message(\n+ text=post[\"message\"],\n+ user=data[\"sender_name\"],\n+ target=data[\"channel_name\"],\n+ connector=self,\n+ raw_event=message,\n+ )\n )\n- )\n \n @register_event(Message)\n async def send_message(self, message):\n", "issue": "Infinite self-responses in Mattermost connector\nAfter fixing the Mattermost connector with PR #1774 it turns out it suffers from the same infinite self-response problem (#1691) as was fixed for the Gitter connector in #1692.\n", "before_files": [{"content": "\"\"\"A connector for Mattermost.\"\"\"\nimport logging\nimport json\n\nfrom mattermostdriver import Driver, Websocket\nfrom voluptuous import Required\n\nfrom opsdroid.connector import Connector, register_event\nfrom opsdroid.events import Message\n\n_LOGGER = logging.getLogger(__name__)\nCONFIG_SCHEMA = {\n Required(\"token\"): str,\n Required(\"url\"): str,\n Required(\"team-name\"): str,\n \"scheme\": str,\n \"port\": int,\n \"ssl-verify\": bool,\n \"connect-timeout\": int,\n}\n\n\nclass ConnectorMattermost(Connector):\n \"\"\"A connector for Mattermost.\"\"\"\n\n def __init__(self, config, opsdroid=None):\n \"\"\"Create the connector.\"\"\"\n super().__init__(config, opsdroid=opsdroid)\n _LOGGER.debug(_(\"Starting Mattermost connector\"))\n self.name = \"mattermost\"\n self.token = config[\"token\"]\n self.url = config[\"url\"]\n self.team_name = config[\"team-name\"]\n self.scheme = config.get(\"scheme\", \"https\")\n self.port = config.get(\"port\", 8065)\n self.verify = config.get(\"ssl-verify\", True)\n self.timeout = config.get(\"connect-timeout\", 30)\n self.request_timeout = None\n self.mfa_token = None\n self.debug = False\n self.listening = True\n\n self.mm_driver = Driver(\n {\n \"url\": self.url,\n \"token\": self.token,\n \"scheme\": self.scheme,\n \"port\": self.port,\n \"verify\": self.verify,\n \"timeout\": self.timeout,\n \"request_timeout\": self.request_timeout,\n \"mfa_token\": self.mfa_token,\n \"debug\": self.debug,\n }\n )\n\n async def connect(self):\n \"\"\"Connect to the chat service.\"\"\"\n _LOGGER.info(_(\"Connecting to Mattermost\"))\n\n login_response = self.mm_driver.login()\n\n _LOGGER.info(login_response)\n\n if \"id\" in login_response:\n self.bot_id = login_response[\"id\"]\n if \"username\" in login_response:\n self.bot_name = login_response[\"username\"]\n\n _LOGGER.info(_(\"Connected as %s\"), self.bot_name)\n\n self.mm_driver.websocket = Websocket(\n self.mm_driver.options, self.mm_driver.client.token\n )\n\n _LOGGER.info(_(\"Connected successfully\"))\n\n async def disconnect(self):\n \"\"\"Disconnect from Mattermost.\"\"\"\n self.listening = False\n self.mm_driver.logout()\n\n async def listen(self):\n \"\"\"Listen for and parse new messages.\"\"\"\n await self.mm_driver.websocket.connect(self.process_message)\n\n async def process_message(self, raw_message):\n \"\"\"Process a raw message and pass it to the parser.\"\"\"\n _LOGGER.info(raw_message)\n\n message = json.loads(raw_message)\n\n if \"event\" in message and message[\"event\"] == \"posted\":\n data = message[\"data\"]\n post = json.loads(data[\"post\"])\n await self.opsdroid.parse(\n Message(\n text=post[\"message\"],\n user=data[\"sender_name\"],\n target=data[\"channel_name\"],\n connector=self,\n raw_event=message,\n )\n )\n\n @register_event(Message)\n async def send_message(self, message):\n \"\"\"Respond with a message.\"\"\"\n _LOGGER.debug(\n _(\"Responding with: '%s' in room %s\"), message.text, message.target\n )\n channel_id = self.mm_driver.channels.get_channel_by_name_and_team_name(\n self.team_name, message.target\n )[\"id\"]\n self.mm_driver.posts.create_post(\n options={\"channel_id\": channel_id, \"message\": message.text}\n )\n", "path": "opsdroid/connector/mattermost/__init__.py"}], "after_files": [{"content": "\"\"\"A connector for Mattermost.\"\"\"\nimport logging\nimport json\n\nfrom mattermostdriver import Driver, Websocket\nfrom voluptuous import Required\n\nfrom opsdroid.connector import Connector, register_event\nfrom opsdroid.events import Message\n\n_LOGGER = logging.getLogger(__name__)\nCONFIG_SCHEMA = {\n Required(\"token\"): str,\n Required(\"url\"): str,\n Required(\"team-name\"): str,\n \"scheme\": str,\n \"port\": int,\n \"ssl-verify\": bool,\n \"connect-timeout\": int,\n}\n\n\nclass ConnectorMattermost(Connector):\n \"\"\"A connector for Mattermost.\"\"\"\n\n def __init__(self, config, opsdroid=None):\n \"\"\"Create the connector.\"\"\"\n super().__init__(config, opsdroid=opsdroid)\n _LOGGER.debug(_(\"Starting Mattermost connector\"))\n self.name = \"mattermost\"\n self.token = config[\"token\"]\n self.url = config[\"url\"]\n self.team_name = config[\"team-name\"]\n self.scheme = config.get(\"scheme\", \"https\")\n self.port = config.get(\"port\", 8065)\n self.verify = config.get(\"ssl-verify\", True)\n self.timeout = config.get(\"connect-timeout\", 30)\n self.request_timeout = None\n self.mfa_token = None\n self.debug = False\n self.listening = True\n self.bot_id = None\n\n self.mm_driver = Driver(\n {\n \"url\": self.url,\n \"token\": self.token,\n \"scheme\": self.scheme,\n \"port\": self.port,\n \"verify\": self.verify,\n \"timeout\": self.timeout,\n \"request_timeout\": self.request_timeout,\n \"mfa_token\": self.mfa_token,\n \"debug\": self.debug,\n }\n )\n\n async def connect(self):\n \"\"\"Connect to the chat service.\"\"\"\n _LOGGER.info(_(\"Connecting to Mattermost\"))\n\n login_response = self.mm_driver.login()\n\n _LOGGER.info(login_response)\n\n if \"id\" in login_response:\n self.bot_id = login_response[\"id\"]\n if \"username\" in login_response:\n self.bot_name = login_response[\"username\"]\n _LOGGER.info(_(\"Connected as %s\"), self.bot_name)\n\n self.mm_driver.websocket = Websocket(\n self.mm_driver.options, self.mm_driver.client.token\n )\n\n _LOGGER.info(_(\"Connected successfully\"))\n\n async def disconnect(self):\n \"\"\"Disconnect from Mattermost.\"\"\"\n self.listening = False\n self.mm_driver.logout()\n\n async def listen(self):\n \"\"\"Listen for and parse new messages.\"\"\"\n await self.mm_driver.websocket.connect(self.process_message)\n\n async def process_message(self, raw_message):\n \"\"\"Process a raw message and pass it to the parser.\"\"\"\n _LOGGER.info(raw_message)\n\n message = json.loads(raw_message)\n\n if \"event\" in message and message[\"event\"] == \"posted\":\n data = message[\"data\"]\n post = json.loads(data[\"post\"])\n # don't parse our own messages (https://github.com/opsdroid/opsdroid/issues/1775)\n # (but also parse if somehow our bot_id is unknown, like in the unit tests)\n if self.bot_id is None or self.bot_id != post[\"user_id\"]:\n await self.opsdroid.parse(\n Message(\n text=post[\"message\"],\n user=data[\"sender_name\"],\n target=data[\"channel_name\"],\n connector=self,\n raw_event=message,\n )\n )\n\n @register_event(Message)\n async def send_message(self, message):\n \"\"\"Respond with a message.\"\"\"\n _LOGGER.debug(\n _(\"Responding with: '%s' in room %s\"), message.text, message.target\n )\n channel_id = self.mm_driver.channels.get_channel_by_name_and_team_name(\n self.team_name, message.target\n )[\"id\"]\n self.mm_driver.posts.create_post(\n options={\"channel_id\": channel_id, \"message\": message.text}\n )\n", "path": "opsdroid/connector/mattermost/__init__.py"}]}
| 1,353 | 440 |
gh_patches_debug_26810
|
rasdani/github-patches
|
git_diff
|
facebookresearch__fairscale-1108
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Lots of Commandline Output from this line.
https://github.com/facebookresearch/fairscale/blob/2350968ee61a6f9ca6ecd24aba9db536e814a24c/fairscale/internal/version.py#L27
this warning appears a LOT in the commandline output when training. Can we remove it or place it in a place where it gets only shown once at the start?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `fairscale/internal/version.py`
Content:
```
1 # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
2 #
3 # This source code is licensed under the BSD license found in the
4 # LICENSE file in the root directory of this source tree.
5
6 import logging
7 import re
8 from typing import List, Tuple
9
10 import torch
11
12 __all__: List[str] = ["torch_version"]
13
14
15 def torch_version(version: str = torch.__version__) -> Tuple[int, ...]:
16 numbering = re.search(r"^(\d+).(\d+).(\d+)([^\+]*)(\+\S*)?$", version)
17 if not numbering:
18 return tuple()
19 # Catch torch version if run against internal pre-releases, like `1.8.0a0fb`,
20 if numbering.group(4):
21 # Two options here:
22 # - either skip this version (minor number check is not relevant)
23 # - or check that our codebase is not broken by this ongoing development.
24
25 # Assuming that we're interested in the second use-case more than the first,
26 # return the pre-release or dev numbering
27 logging.warning(f"Pytorch pre-release version {version} - assuming intent to test it")
28
29 return tuple(int(numbering.group(n)) for n in range(1, 4))
30
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/fairscale/internal/version.py b/fairscale/internal/version.py
--- a/fairscale/internal/version.py
+++ b/fairscale/internal/version.py
@@ -11,13 +11,15 @@
__all__: List[str] = ["torch_version"]
+_logged = False
def torch_version(version: str = torch.__version__) -> Tuple[int, ...]:
numbering = re.search(r"^(\d+).(\d+).(\d+)([^\+]*)(\+\S*)?$", version)
if not numbering:
return tuple()
# Catch torch version if run against internal pre-releases, like `1.8.0a0fb`,
- if numbering.group(4):
+ global _logged
+ if numbering.group(4) and not _logged:
# Two options here:
# - either skip this version (minor number check is not relevant)
# - or check that our codebase is not broken by this ongoing development.
@@ -25,5 +27,6 @@
# Assuming that we're interested in the second use-case more than the first,
# return the pre-release or dev numbering
logging.warning(f"Pytorch pre-release version {version} - assuming intent to test it")
+ _logged = True
return tuple(int(numbering.group(n)) for n in range(1, 4))
|
{"golden_diff": "diff --git a/fairscale/internal/version.py b/fairscale/internal/version.py\n--- a/fairscale/internal/version.py\n+++ b/fairscale/internal/version.py\n@@ -11,13 +11,15 @@\n \n __all__: List[str] = [\"torch_version\"]\n \n+_logged = False\n \n def torch_version(version: str = torch.__version__) -> Tuple[int, ...]:\n numbering = re.search(r\"^(\\d+).(\\d+).(\\d+)([^\\+]*)(\\+\\S*)?$\", version)\n if not numbering:\n return tuple()\n # Catch torch version if run against internal pre-releases, like `1.8.0a0fb`,\n- if numbering.group(4):\n+ global _logged\n+ if numbering.group(4) and not _logged:\n # Two options here:\n # - either skip this version (minor number check is not relevant)\n # - or check that our codebase is not broken by this ongoing development.\n@@ -25,5 +27,6 @@\n # Assuming that we're interested in the second use-case more than the first,\n # return the pre-release or dev numbering\n logging.warning(f\"Pytorch pre-release version {version} - assuming intent to test it\")\n+ _logged = True\n \n return tuple(int(numbering.group(n)) for n in range(1, 4))\n", "issue": "Lots of Commandline Output from this line.\n\r\nhttps://github.com/facebookresearch/fairscale/blob/2350968ee61a6f9ca6ecd24aba9db536e814a24c/fairscale/internal/version.py#L27\r\n\r\nthis warning appears a LOT in the commandline output when training. Can we remove it or place it in a place where it gets only shown once at the start?\r\n\n", "before_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.\n#\n# This source code is licensed under the BSD license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport logging\nimport re\nfrom typing import List, Tuple\n\nimport torch\n\n__all__: List[str] = [\"torch_version\"]\n\n\ndef torch_version(version: str = torch.__version__) -> Tuple[int, ...]:\n numbering = re.search(r\"^(\\d+).(\\d+).(\\d+)([^\\+]*)(\\+\\S*)?$\", version)\n if not numbering:\n return tuple()\n # Catch torch version if run against internal pre-releases, like `1.8.0a0fb`,\n if numbering.group(4):\n # Two options here:\n # - either skip this version (minor number check is not relevant)\n # - or check that our codebase is not broken by this ongoing development.\n\n # Assuming that we're interested in the second use-case more than the first,\n # return the pre-release or dev numbering\n logging.warning(f\"Pytorch pre-release version {version} - assuming intent to test it\")\n\n return tuple(int(numbering.group(n)) for n in range(1, 4))\n", "path": "fairscale/internal/version.py"}], "after_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.\n#\n# This source code is licensed under the BSD license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport logging\nimport re\nfrom typing import List, Tuple\n\nimport torch\n\n__all__: List[str] = [\"torch_version\"]\n\n_logged = False\n\ndef torch_version(version: str = torch.__version__) -> Tuple[int, ...]:\n numbering = re.search(r\"^(\\d+).(\\d+).(\\d+)([^\\+]*)(\\+\\S*)?$\", version)\n if not numbering:\n return tuple()\n # Catch torch version if run against internal pre-releases, like `1.8.0a0fb`,\n global _logged\n if numbering.group(4) and not _logged:\n # Two options here:\n # - either skip this version (minor number check is not relevant)\n # - or check that our codebase is not broken by this ongoing development.\n\n # Assuming that we're interested in the second use-case more than the first,\n # return the pre-release or dev numbering\n logging.warning(f\"Pytorch pre-release version {version} - assuming intent to test it\")\n _logged = True\n\n return tuple(int(numbering.group(n)) for n in range(1, 4))\n", "path": "fairscale/internal/version.py"}]}
| 682 | 300 |
gh_patches_debug_13256
|
rasdani/github-patches
|
git_diff
|
openvinotoolkit__datumaro-278
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Test errors after tensorflow installation on Ubuntu 20.04
Ubuntu20.04, Python 3.8.5
Installation of tensorflow (to enable skipped tests) results with tests errors.
**Steps to reproduce:**
```
git clone https://github.com/openvinotoolkit/datumaro
cd datumaro
python3 -m pip install virtualenv
python3 -m virtualenv venv
. venv/bin/activate
pip install datumaro
python3 -m unittest -v
//there are some skipped tests - required tensorflow and pandas)
pip install tensorflow
//during installation numpy 1.20.3 was uninstalled and 1.19.5 was installed
python3 -m unittest -v
```
**Expected result:**
No test errors after installation libraries required to perform initially skipped tests.
**Current result:**
```
Ran 390 tests in 11.807s
FAILED (errors=29, skipped=7)
```
```======================================================================
ERROR: test_validate_annotations_segmentation (tests.test_validator.TestValidateAnnotations)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/home/sstrehlk/src/datum_p/tests/test_validator.py", line 803, in test_validate_annotations_segmentation
actual_results = validate_annotations(self.dataset, 'segmentation',
File "/home/sstrehlk/src/datum_p/datumaro/components/validator.py", line 1255, in validate_annotations
stats = validator.compute_statistics(dataset)
File "/home/sstrehlk/src/datum_p/datumaro/components/validator.py", line 1064, in compute_statistics
_update_mask_stats_by_label(
File "/home/sstrehlk/src/datum_p/datumaro/components/validator.py", line 1026, in _update_mask_stats_by_label
area = ann.get_area()
File "/home/sstrehlk/src/datum_p/datumaro/components/extractor.py", line 374, in get_area
import pycocotools.mask as mask_utils
File "/home/sstrehlk/src/datum_p/venv/lib/python3.8/site-packages/pycocotools/mask.py", line 3, in <module>
import pycocotools._mask as _mask
File "pycocotools/_mask.pyx", line 1, in init pycocotools._mask
ValueError: numpy.ndarray size changed, may indicate binary incompatibility. Expected 88 from C header, got 80 from PyObject
----------------------------------------------------------------------
```
It seems that there is incompatibility between numpy 1.19.5 and pycocotools 2.0.2. There is workaround for it:
```
pip install pycocotools==2.0.0
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1
2 # Copyright (C) 2019-2020 Intel Corporation
3 #
4 # SPDX-License-Identifier: MIT
5
6 from distutils.util import strtobool
7 import os
8 import os.path as osp
9 import re
10 import setuptools
11
12 # Snyk scan integration
13 here = None
14
15
16 def find_version(project_dir=None):
17 if not project_dir:
18 project_dir = osp.dirname(osp.abspath(__file__))
19
20 file_path = osp.join(project_dir, 'datumaro', 'version.py')
21
22 with open(file_path, 'r') as version_file:
23 version_text = version_file.read()
24
25 # PEP440:
26 # https://www.python.org/dev/peps/pep-0440/#appendix-b-parsing-version-strings-with-regular-expressions
27 pep_regex = r'([1-9]\d*!)?(0|[1-9]\d*)(\.(0|[1-9]\d*))*((a|b|rc)(0|[1-9]\d*))?(\.post(0|[1-9]\d*))?(\.dev(0|[1-9]\d*))?'
28 version_regex = r'VERSION\s*=\s*.(' + pep_regex + ').'
29 match = re.match(version_regex, version_text)
30 if not match:
31 raise RuntimeError("Failed to find version string in '%s'" % file_path)
32
33 version = version_text[match.start(1) : match.end(1)]
34 return version
35
36 def get_requirements():
37 requirements = [
38 'attrs>=19.3.0',
39 'defusedxml',
40 'GitPython',
41 'lxml',
42 'matplotlib',
43 'numpy>=1.17.3',
44 'Pillow',
45 'pycocotools; platform_system != "Windows"',
46 'pycocotools-windows; platform_system == "Windows"',
47 'PyYAML',
48 'scikit-image',
49 'tensorboardX',
50 ]
51 if strtobool(os.getenv('DATUMARO_HEADLESS', '0').lower()):
52 requirements.append('opencv-python-headless')
53 else:
54 requirements.append('opencv-python')
55
56 return requirements
57
58 with open('README.md', 'r') as fh:
59 long_description = fh.read()
60
61 setuptools.dist.Distribution().fetch_build_eggs([
62 'Cython>=0.27.3' # required for pycocotools and others, if need to compile
63 ])
64
65 setuptools.setup(
66 name="datumaro",
67 version=find_version(here),
68 author="Intel",
69 author_email="[email protected]",
70 description="Dataset Management Framework (Datumaro)",
71 long_description=long_description,
72 long_description_content_type="text/markdown",
73 url="https://github.com/openvinotoolkit/datumaro",
74 packages=setuptools.find_packages(exclude=['tests*']),
75 classifiers=[
76 "Programming Language :: Python :: 3",
77 "License :: OSI Approved :: MIT License",
78 "Operating System :: OS Independent",
79 ],
80 python_requires='>=3.6',
81 install_requires=get_requirements(),
82 extras_require={
83 'tf': ['tensorflow'],
84 'tf-gpu': ['tensorflow-gpu'],
85 },
86 entry_points={
87 'console_scripts': [
88 'datum=datumaro.cli.__main__:main',
89 ],
90 },
91 )
92
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -42,8 +42,17 @@
'matplotlib',
'numpy>=1.17.3',
'Pillow',
- 'pycocotools; platform_system != "Windows"',
+
+ # Avoid 2.0.2 Linux binary distribution because of
+ # a conflict in numpy versions with TensorFlow:
+ # - TF is compiled with numpy 1.19 ABI
+ # - pycocotools is compiled with numpy 1.20 ABI
+ # Using a previous version allows to force package rebuilding.
+ #
+ # https://github.com/openvinotoolkit/datumaro/issues/253
+ 'pycocotools!=2.0.2; platform_system != "Windows"',
'pycocotools-windows; platform_system == "Windows"',
+
'PyYAML',
'scikit-image',
'tensorboardX',
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -42,8 +42,17 @@\n 'matplotlib',\n 'numpy>=1.17.3',\n 'Pillow',\n- 'pycocotools; platform_system != \"Windows\"',\n+\n+ # Avoid 2.0.2 Linux binary distribution because of\n+ # a conflict in numpy versions with TensorFlow:\n+ # - TF is compiled with numpy 1.19 ABI\n+ # - pycocotools is compiled with numpy 1.20 ABI\n+ # Using a previous version allows to force package rebuilding.\n+ #\n+ # https://github.com/openvinotoolkit/datumaro/issues/253\n+ 'pycocotools!=2.0.2; platform_system != \"Windows\"',\n 'pycocotools-windows; platform_system == \"Windows\"',\n+\n 'PyYAML',\n 'scikit-image',\n 'tensorboardX',\n", "issue": "Test errors after tensorflow installation on Ubuntu 20.04\nUbuntu20.04, Python 3.8.5 \r\nInstallation of tensorflow (to enable skipped tests) results with tests errors.\r\n\r\n**Steps to reproduce:**\r\n```\r\ngit clone https://github.com/openvinotoolkit/datumaro \r\ncd datumaro\r\npython3 -m pip install virtualenv\r\npython3 -m virtualenv venv\r\n. venv/bin/activate\r\npip install datumaro\r\npython3 -m unittest -v\r\n//there are some skipped tests - required tensorflow and pandas)\r\npip install tensorflow\r\n//during installation numpy 1.20.3 was uninstalled and 1.19.5 was installed\r\npython3 -m unittest -v\r\n```\r\n**Expected result:**\r\nNo test errors after installation libraries required to perform initially skipped tests.\r\n\r\n**Current result:**\r\n```\r\nRan 390 tests in 11.807s\r\n\r\nFAILED (errors=29, skipped=7)\r\n```\r\n```======================================================================\r\nERROR: test_validate_annotations_segmentation (tests.test_validator.TestValidateAnnotations)\r\n----------------------------------------------------------------------\r\nTraceback (most recent call last):\r\n File \"/home/sstrehlk/src/datum_p/tests/test_validator.py\", line 803, in test_validate_annotations_segmentation\r\n actual_results = validate_annotations(self.dataset, 'segmentation',\r\n File \"/home/sstrehlk/src/datum_p/datumaro/components/validator.py\", line 1255, in validate_annotations\r\n stats = validator.compute_statistics(dataset)\r\n File \"/home/sstrehlk/src/datum_p/datumaro/components/validator.py\", line 1064, in compute_statistics\r\n _update_mask_stats_by_label(\r\n File \"/home/sstrehlk/src/datum_p/datumaro/components/validator.py\", line 1026, in _update_mask_stats_by_label\r\n area = ann.get_area()\r\n File \"/home/sstrehlk/src/datum_p/datumaro/components/extractor.py\", line 374, in get_area\r\n import pycocotools.mask as mask_utils\r\n File \"/home/sstrehlk/src/datum_p/venv/lib/python3.8/site-packages/pycocotools/mask.py\", line 3, in <module>\r\n import pycocotools._mask as _mask\r\n File \"pycocotools/_mask.pyx\", line 1, in init pycocotools._mask\r\nValueError: numpy.ndarray size changed, may indicate binary incompatibility. Expected 88 from C header, got 80 from PyObject\r\n\r\n----------------------------------------------------------------------\r\n```\r\nIt seems that there is incompatibility between numpy 1.19.5 and pycocotools 2.0.2. There is workaround for it: \r\n```\r\npip install pycocotools==2.0.0\r\n``` \n", "before_files": [{"content": "\n# Copyright (C) 2019-2020 Intel Corporation\n#\n# SPDX-License-Identifier: MIT\n\nfrom distutils.util import strtobool\nimport os\nimport os.path as osp\nimport re\nimport setuptools\n\n# Snyk scan integration\nhere = None\n\n\ndef find_version(project_dir=None):\n if not project_dir:\n project_dir = osp.dirname(osp.abspath(__file__))\n\n file_path = osp.join(project_dir, 'datumaro', 'version.py')\n\n with open(file_path, 'r') as version_file:\n version_text = version_file.read()\n\n # PEP440:\n # https://www.python.org/dev/peps/pep-0440/#appendix-b-parsing-version-strings-with-regular-expressions\n pep_regex = r'([1-9]\\d*!)?(0|[1-9]\\d*)(\\.(0|[1-9]\\d*))*((a|b|rc)(0|[1-9]\\d*))?(\\.post(0|[1-9]\\d*))?(\\.dev(0|[1-9]\\d*))?'\n version_regex = r'VERSION\\s*=\\s*.(' + pep_regex + ').'\n match = re.match(version_regex, version_text)\n if not match:\n raise RuntimeError(\"Failed to find version string in '%s'\" % file_path)\n\n version = version_text[match.start(1) : match.end(1)]\n return version\n\ndef get_requirements():\n requirements = [\n 'attrs>=19.3.0',\n 'defusedxml',\n 'GitPython',\n 'lxml',\n 'matplotlib',\n 'numpy>=1.17.3',\n 'Pillow',\n 'pycocotools; platform_system != \"Windows\"',\n 'pycocotools-windows; platform_system == \"Windows\"',\n 'PyYAML',\n 'scikit-image',\n 'tensorboardX',\n ]\n if strtobool(os.getenv('DATUMARO_HEADLESS', '0').lower()):\n requirements.append('opencv-python-headless')\n else:\n requirements.append('opencv-python')\n\n return requirements\n\nwith open('README.md', 'r') as fh:\n long_description = fh.read()\n\nsetuptools.dist.Distribution().fetch_build_eggs([\n 'Cython>=0.27.3' # required for pycocotools and others, if need to compile\n])\n\nsetuptools.setup(\n name=\"datumaro\",\n version=find_version(here),\n author=\"Intel\",\n author_email=\"[email protected]\",\n description=\"Dataset Management Framework (Datumaro)\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/openvinotoolkit/datumaro\",\n packages=setuptools.find_packages(exclude=['tests*']),\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n ],\n python_requires='>=3.6',\n install_requires=get_requirements(),\n extras_require={\n 'tf': ['tensorflow'],\n 'tf-gpu': ['tensorflow-gpu'],\n },\n entry_points={\n 'console_scripts': [\n 'datum=datumaro.cli.__main__:main',\n ],\n },\n)\n", "path": "setup.py"}], "after_files": [{"content": "\n# Copyright (C) 2019-2020 Intel Corporation\n#\n# SPDX-License-Identifier: MIT\n\nfrom distutils.util import strtobool\nimport os\nimport os.path as osp\nimport re\nimport setuptools\n\n# Snyk scan integration\nhere = None\n\n\ndef find_version(project_dir=None):\n if not project_dir:\n project_dir = osp.dirname(osp.abspath(__file__))\n\n file_path = osp.join(project_dir, 'datumaro', 'version.py')\n\n with open(file_path, 'r') as version_file:\n version_text = version_file.read()\n\n # PEP440:\n # https://www.python.org/dev/peps/pep-0440/#appendix-b-parsing-version-strings-with-regular-expressions\n pep_regex = r'([1-9]\\d*!)?(0|[1-9]\\d*)(\\.(0|[1-9]\\d*))*((a|b|rc)(0|[1-9]\\d*))?(\\.post(0|[1-9]\\d*))?(\\.dev(0|[1-9]\\d*))?'\n version_regex = r'VERSION\\s*=\\s*.(' + pep_regex + ').'\n match = re.match(version_regex, version_text)\n if not match:\n raise RuntimeError(\"Failed to find version string in '%s'\" % file_path)\n\n version = version_text[match.start(1) : match.end(1)]\n return version\n\ndef get_requirements():\n requirements = [\n 'attrs>=19.3.0',\n 'defusedxml',\n 'GitPython',\n 'lxml',\n 'matplotlib',\n 'numpy>=1.17.3',\n 'Pillow',\n\n # Avoid 2.0.2 Linux binary distribution because of\n # a conflict in numpy versions with TensorFlow:\n # - TF is compiled with numpy 1.19 ABI\n # - pycocotools is compiled with numpy 1.20 ABI\n # Using a previous version allows to force package rebuilding.\n #\n # https://github.com/openvinotoolkit/datumaro/issues/253\n 'pycocotools!=2.0.2; platform_system != \"Windows\"',\n 'pycocotools-windows; platform_system == \"Windows\"',\n\n 'PyYAML',\n 'scikit-image',\n 'tensorboardX',\n ]\n if strtobool(os.getenv('DATUMARO_HEADLESS', '0').lower()):\n requirements.append('opencv-python-headless')\n else:\n requirements.append('opencv-python')\n\n return requirements\n\nwith open('README.md', 'r') as fh:\n long_description = fh.read()\n\nsetuptools.dist.Distribution().fetch_build_eggs([\n 'Cython>=0.27.3' # required for pycocotools and others, if need to compile\n])\n\nsetuptools.setup(\n name=\"datumaro\",\n version=find_version(here),\n author=\"Intel\",\n author_email=\"[email protected]\",\n description=\"Dataset Management Framework (Datumaro)\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/openvinotoolkit/datumaro\",\n packages=setuptools.find_packages(exclude=['tests*']),\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n ],\n python_requires='>=3.6',\n install_requires=get_requirements(),\n extras_require={\n 'tf': ['tensorflow'],\n 'tf-gpu': ['tensorflow-gpu'],\n },\n entry_points={\n 'console_scripts': [\n 'datum=datumaro.cli.__main__:main',\n ],\n },\n)\n", "path": "setup.py"}]}
| 1,770 | 221 |
gh_patches_debug_28710
|
rasdani/github-patches
|
git_diff
|
mampfes__hacs_waste_collection_schedule-1639
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Bug]: Waste type not showing for City of Doncaster (Green Bin Collection)
### I Have A Problem With:
A specific source, The integration in general
### What's Your Problem
For Doncaster Council the Green bin collection only runs 9months out of 12. Next collection is in early March which can be viewed via the Council bin look-up calendar, but the waste type is not returned on the integration, it only seems to scrape 3 weeks in advance, can this be changed?
### Source (if relevant)
doncaster_gov_uk
### Logs
```Shell
no relevant logs
```
### Relevant Configuration
```YAML
waste_collection_schedule:
sources:
- name: doncaster_gov_uk
args:
uprn: "xxxxxxx"
- platform: waste_collection_schedule
name: Bins
details_format: appointment_types
leadtime: 90
# value_template: VALUE_TEMPLATE
# date_template: DATE_TEMPLATE
add_days_to: true
# event_index: EVENT_INDEX
```
### Checklist Source Error
- [X] Use the example parameters for your source (often available in the documentation) (don't forget to restart Home Assistant after changing the configuration)
- [X] Checked that the website of your service provider is still working
- [ ] Tested my attributes on the service provider website (if possible)
- [X] I have tested with the latest version of the integration (master) (for HACS in the 3 dot menu of the integration click on "Redownload" and choose master as version)
### Checklist Sensor Error
- [X] Checked in the Home Assistant Calendar tab if the event names match the types names (if types argument is used)
### Required
- [X] I have searched past (closed AND opened) issues to see if this bug has already been reported, and it hasn't been.
- [X] I understand that people give their precious time for free, and thus I've done my very best to make this problem as easy as possible to investigate.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `custom_components/waste_collection_schedule/waste_collection_schedule/source/doncaster_gov_uk.py`
Content:
```
1 import re
2 import requests
3 import json
4 from datetime import datetime, timedelta
5 from waste_collection_schedule import Collection # type: ignore[attr-defined]
6
7 TITLE = "City of Doncaster Council"
8 DESCRIPTION = "Source for doncaster.gov.uk services for the City of Doncaster Council, UK."
9 URL = "https://doncaster.gov.uk"
10
11 TEST_CASES = {
12 "Test_001": {"uprn": "100050701118"},
13 "Test_002": {"uprn": "100050753396"},
14 "Test_003": {"uprn": 100050699118},
15 }
16
17 ICON_MAP = {
18 "GREEN": "mdi:leaf",
19 "RECYCLING": "mdi:recycle",
20 "BLACK": "mdi:trash-can",
21 "BULKY": "mdi:fridge",
22 "RE-USE": "mdi:sofa",
23 }
24
25 REGEX_DATE = r"\(([0-9]{10})"
26
27
28 class Source:
29 def __init__(self, uprn):
30 self._uprn = str(uprn).zfill(12)
31
32
33 def fetch(self):
34
35 # Query needs start and end epoch dates
36 today = datetime.now().replace(hour=0, minute=0, second=0, microsecond=0)
37 start = (today - timedelta(weeks=3)).strftime("%s")
38 end = (today + timedelta(weeks=3)).strftime("%s")
39 url = f"https://www.doncaster.gov.uk/Compass/PremiseDetail/GetCollectionsForCalendar?UPRN={self._uprn}&Start={start}&End={end}"
40 # start = start.strftime("%s")
41 # end = end.strftime("%s")
42
43 s = requests.Session()
44 r = s.get(url)
45 data = json.loads(r.text)
46
47 entries = []
48
49 for entry in data["slots"]:
50 waste_type = entry["title"]
51 waste_date = entry["end"]
52 epoch = re.findall(REGEX_DATE, waste_date)
53 waste_date = datetime.fromtimestamp(int(epoch[0])).date()
54 entries.append(
55 Collection(
56 date=waste_date,
57 t=waste_type,
58 icon=ICON_MAP.get(waste_type.upper()),
59 )
60 )
61
62 return entries
63
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/doncaster_gov_uk.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/doncaster_gov_uk.py
--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/doncaster_gov_uk.py
+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/doncaster_gov_uk.py
@@ -1,11 +1,14 @@
-import re
-import requests
import json
+import re
from datetime import datetime, timedelta
+
+import requests
from waste_collection_schedule import Collection # type: ignore[attr-defined]
TITLE = "City of Doncaster Council"
-DESCRIPTION = "Source for doncaster.gov.uk services for the City of Doncaster Council, UK."
+DESCRIPTION = (
+ "Source for doncaster.gov.uk services for the City of Doncaster Council, UK."
+)
URL = "https://doncaster.gov.uk"
TEST_CASES = {
@@ -29,13 +32,11 @@
def __init__(self, uprn):
self._uprn = str(uprn).zfill(12)
-
def fetch(self):
-
- # Query needs start and end epoch dates
+ # Query needs start and end epoch dates
today = datetime.now().replace(hour=0, minute=0, second=0, microsecond=0)
- start = (today - timedelta(weeks=3)).strftime("%s")
- end = (today + timedelta(weeks=3)).strftime("%s")
+ start = (today - timedelta(days=365)).strftime("%s")
+ end = (today + timedelta(days=365)).strftime("%s")
url = f"https://www.doncaster.gov.uk/Compass/PremiseDetail/GetCollectionsForCalendar?UPRN={self._uprn}&Start={start}&End={end}"
# start = start.strftime("%s")
# end = end.strftime("%s")
|
{"golden_diff": "diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/doncaster_gov_uk.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/doncaster_gov_uk.py\n--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/doncaster_gov_uk.py\n+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/doncaster_gov_uk.py\n@@ -1,11 +1,14 @@\n-import re\n-import requests\n import json\n+import re\n from datetime import datetime, timedelta\n+\n+import requests\n from waste_collection_schedule import Collection # type: ignore[attr-defined]\n \n TITLE = \"City of Doncaster Council\"\n-DESCRIPTION = \"Source for doncaster.gov.uk services for the City of Doncaster Council, UK.\"\n+DESCRIPTION = (\n+ \"Source for doncaster.gov.uk services for the City of Doncaster Council, UK.\"\n+)\n URL = \"https://doncaster.gov.uk\"\n \n TEST_CASES = {\n@@ -29,13 +32,11 @@\n def __init__(self, uprn):\n self._uprn = str(uprn).zfill(12)\n \n-\n def fetch(self):\n-\n- # Query needs start and end epoch dates \n+ # Query needs start and end epoch dates\n today = datetime.now().replace(hour=0, minute=0, second=0, microsecond=0)\n- start = (today - timedelta(weeks=3)).strftime(\"%s\")\n- end = (today + timedelta(weeks=3)).strftime(\"%s\")\n+ start = (today - timedelta(days=365)).strftime(\"%s\")\n+ end = (today + timedelta(days=365)).strftime(\"%s\")\n url = f\"https://www.doncaster.gov.uk/Compass/PremiseDetail/GetCollectionsForCalendar?UPRN={self._uprn}&Start={start}&End={end}\"\n # start = start.strftime(\"%s\")\n # end = end.strftime(\"%s\")\n", "issue": "[Bug]: Waste type not showing for City of Doncaster (Green Bin Collection)\n### I Have A Problem With:\n\nA specific source, The integration in general\n\n### What's Your Problem\n\nFor Doncaster Council the Green bin collection only runs 9months out of 12. Next collection is in early March which can be viewed via the Council bin look-up calendar, but the waste type is not returned on the integration, it only seems to scrape 3 weeks in advance, can this be changed? \n\n### Source (if relevant)\n\ndoncaster_gov_uk\n\n### Logs\n\n```Shell\nno relevant logs\n```\n\n\n### Relevant Configuration\n\n```YAML\nwaste_collection_schedule:\r\n sources:\r\n - name: doncaster_gov_uk\r\n args:\r\n uprn: \"xxxxxxx\"\r\n\r\n - platform: waste_collection_schedule\r\n name: Bins\r\n details_format: appointment_types\r\n leadtime: 90\r\n# value_template: VALUE_TEMPLATE\r\n# date_template: DATE_TEMPLATE\r\n add_days_to: true\r\n# event_index: EVENT_INDEX\n```\n\n\n### Checklist Source Error\n\n- [X] Use the example parameters for your source (often available in the documentation) (don't forget to restart Home Assistant after changing the configuration)\n- [X] Checked that the website of your service provider is still working\n- [ ] Tested my attributes on the service provider website (if possible)\n- [X] I have tested with the latest version of the integration (master) (for HACS in the 3 dot menu of the integration click on \"Redownload\" and choose master as version)\n\n### Checklist Sensor Error\n\n- [X] Checked in the Home Assistant Calendar tab if the event names match the types names (if types argument is used)\n\n### Required\n\n- [X] I have searched past (closed AND opened) issues to see if this bug has already been reported, and it hasn't been.\n- [X] I understand that people give their precious time for free, and thus I've done my very best to make this problem as easy as possible to investigate.\n", "before_files": [{"content": "import re\nimport requests\nimport json\nfrom datetime import datetime, timedelta\nfrom waste_collection_schedule import Collection # type: ignore[attr-defined]\n\nTITLE = \"City of Doncaster Council\"\nDESCRIPTION = \"Source for doncaster.gov.uk services for the City of Doncaster Council, UK.\"\nURL = \"https://doncaster.gov.uk\"\n\nTEST_CASES = {\n \"Test_001\": {\"uprn\": \"100050701118\"},\n \"Test_002\": {\"uprn\": \"100050753396\"},\n \"Test_003\": {\"uprn\": 100050699118},\n}\n\nICON_MAP = {\n \"GREEN\": \"mdi:leaf\",\n \"RECYCLING\": \"mdi:recycle\",\n \"BLACK\": \"mdi:trash-can\",\n \"BULKY\": \"mdi:fridge\",\n \"RE-USE\": \"mdi:sofa\",\n}\n\nREGEX_DATE = r\"\\(([0-9]{10})\"\n\n\nclass Source:\n def __init__(self, uprn):\n self._uprn = str(uprn).zfill(12)\n\n\n def fetch(self):\n\n # Query needs start and end epoch dates \n today = datetime.now().replace(hour=0, minute=0, second=0, microsecond=0)\n start = (today - timedelta(weeks=3)).strftime(\"%s\")\n end = (today + timedelta(weeks=3)).strftime(\"%s\")\n url = f\"https://www.doncaster.gov.uk/Compass/PremiseDetail/GetCollectionsForCalendar?UPRN={self._uprn}&Start={start}&End={end}\"\n # start = start.strftime(\"%s\")\n # end = end.strftime(\"%s\")\n\n s = requests.Session()\n r = s.get(url)\n data = json.loads(r.text)\n\n entries = []\n\n for entry in data[\"slots\"]:\n waste_type = entry[\"title\"]\n waste_date = entry[\"end\"]\n epoch = re.findall(REGEX_DATE, waste_date)\n waste_date = datetime.fromtimestamp(int(epoch[0])).date()\n entries.append(\n Collection(\n date=waste_date,\n t=waste_type,\n icon=ICON_MAP.get(waste_type.upper()),\n )\n )\n\n return entries\n", "path": "custom_components/waste_collection_schedule/waste_collection_schedule/source/doncaster_gov_uk.py"}], "after_files": [{"content": "import json\nimport re\nfrom datetime import datetime, timedelta\n\nimport requests\nfrom waste_collection_schedule import Collection # type: ignore[attr-defined]\n\nTITLE = \"City of Doncaster Council\"\nDESCRIPTION = (\n \"Source for doncaster.gov.uk services for the City of Doncaster Council, UK.\"\n)\nURL = \"https://doncaster.gov.uk\"\n\nTEST_CASES = {\n \"Test_001\": {\"uprn\": \"100050701118\"},\n \"Test_002\": {\"uprn\": \"100050753396\"},\n \"Test_003\": {\"uprn\": 100050699118},\n}\n\nICON_MAP = {\n \"GREEN\": \"mdi:leaf\",\n \"RECYCLING\": \"mdi:recycle\",\n \"BLACK\": \"mdi:trash-can\",\n \"BULKY\": \"mdi:fridge\",\n \"RE-USE\": \"mdi:sofa\",\n}\n\nREGEX_DATE = r\"\\(([0-9]{10})\"\n\n\nclass Source:\n def __init__(self, uprn):\n self._uprn = str(uprn).zfill(12)\n\n def fetch(self):\n # Query needs start and end epoch dates\n today = datetime.now().replace(hour=0, minute=0, second=0, microsecond=0)\n start = (today - timedelta(days=365)).strftime(\"%s\")\n end = (today + timedelta(days=365)).strftime(\"%s\")\n url = f\"https://www.doncaster.gov.uk/Compass/PremiseDetail/GetCollectionsForCalendar?UPRN={self._uprn}&Start={start}&End={end}\"\n # start = start.strftime(\"%s\")\n # end = end.strftime(\"%s\")\n\n s = requests.Session()\n r = s.get(url)\n data = json.loads(r.text)\n\n entries = []\n\n for entry in data[\"slots\"]:\n waste_type = entry[\"title\"]\n waste_date = entry[\"end\"]\n epoch = re.findall(REGEX_DATE, waste_date)\n waste_date = datetime.fromtimestamp(int(epoch[0])).date()\n entries.append(\n Collection(\n date=waste_date,\n t=waste_type,\n icon=ICON_MAP.get(waste_type.upper()),\n )\n )\n\n return entries\n", "path": "custom_components/waste_collection_schedule/waste_collection_schedule/source/doncaster_gov_uk.py"}]}
| 1,340 | 435 |
gh_patches_debug_1937
|
rasdani/github-patches
|
git_diff
|
ivy-llc__ivy-23588
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ifft2
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ivy/functional/frontends/jax/numpy/fft.py`
Content:
```
1 # local
2 import ivy
3 from ivy.functional.frontends.jax.func_wrapper import to_ivy_arrays_and_back
4 from ivy.func_wrapper import with_unsupported_dtypes
5
6
7 @to_ivy_arrays_and_back
8 def fft(a, n=None, axis=-1, norm=None):
9 if norm is None:
10 norm = "backward"
11 return ivy.fft(a, axis, norm=norm, n=n)
12
13
14 @to_ivy_arrays_and_back
15 def fft2(a, s=None, axes=(-2, -1), norm=None):
16 if norm is None:
17 norm = "backward"
18 return ivy.array(ivy.fft2(a, s=s, dim=axes, norm=norm), dtype=ivy.dtype(a))
19
20
21 @to_ivy_arrays_and_back
22 @with_unsupported_dtypes({"2.4.2 and below": ("float16", "bfloat16")}, "paddle")
23 def fftshift(x, axes=None, name=None):
24 shape = x.shape
25
26 if axes is None:
27 axes = tuple(range(x.ndim))
28 shifts = [(dim // 2) for dim in shape]
29 elif isinstance(axes, int):
30 shifts = shape[axes] // 2
31 else:
32 shifts = [shape[ax] // 2 for ax in axes]
33
34 roll = ivy.roll(x, shifts, axis=axes)
35
36 return roll
37
38
39 @to_ivy_arrays_and_back
40 def ifft(a, n=None, axis=-1, norm=None):
41 if norm is None:
42 norm = "backward"
43 return ivy.ifft(a, axis, norm=norm, n=n)
44
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/ivy/functional/frontends/jax/numpy/fft.py b/ivy/functional/frontends/jax/numpy/fft.py
--- a/ivy/functional/frontends/jax/numpy/fft.py
+++ b/ivy/functional/frontends/jax/numpy/fft.py
@@ -41,3 +41,10 @@
if norm is None:
norm = "backward"
return ivy.ifft(a, axis, norm=norm, n=n)
+
+
+@to_ivy_arrays_and_back
+def ifft2(a, s=None, axes=(-2, -1), norm=None):
+ if norm is None:
+ norm = "backward"
+ return ivy.array(ivy.ifft2(a, s=s, dim=axes, norm=norm), dtype=ivy.dtype(a))
|
{"golden_diff": "diff --git a/ivy/functional/frontends/jax/numpy/fft.py b/ivy/functional/frontends/jax/numpy/fft.py\n--- a/ivy/functional/frontends/jax/numpy/fft.py\n+++ b/ivy/functional/frontends/jax/numpy/fft.py\n@@ -41,3 +41,10 @@\n if norm is None:\n norm = \"backward\"\n return ivy.ifft(a, axis, norm=norm, n=n)\n+\n+\n+@to_ivy_arrays_and_back\n+def ifft2(a, s=None, axes=(-2, -1), norm=None):\n+ if norm is None:\n+ norm = \"backward\"\n+ return ivy.array(ivy.ifft2(a, s=s, dim=axes, norm=norm), dtype=ivy.dtype(a))\n", "issue": " ifft2\n\n", "before_files": [{"content": "# local\nimport ivy\nfrom ivy.functional.frontends.jax.func_wrapper import to_ivy_arrays_and_back\nfrom ivy.func_wrapper import with_unsupported_dtypes\n\n\n@to_ivy_arrays_and_back\ndef fft(a, n=None, axis=-1, norm=None):\n if norm is None:\n norm = \"backward\"\n return ivy.fft(a, axis, norm=norm, n=n)\n\n\n@to_ivy_arrays_and_back\ndef fft2(a, s=None, axes=(-2, -1), norm=None):\n if norm is None:\n norm = \"backward\"\n return ivy.array(ivy.fft2(a, s=s, dim=axes, norm=norm), dtype=ivy.dtype(a))\n\n\n@to_ivy_arrays_and_back\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\ndef fftshift(x, axes=None, name=None):\n shape = x.shape\n\n if axes is None:\n axes = tuple(range(x.ndim))\n shifts = [(dim // 2) for dim in shape]\n elif isinstance(axes, int):\n shifts = shape[axes] // 2\n else:\n shifts = [shape[ax] // 2 for ax in axes]\n\n roll = ivy.roll(x, shifts, axis=axes)\n\n return roll\n\n\n@to_ivy_arrays_and_back\ndef ifft(a, n=None, axis=-1, norm=None):\n if norm is None:\n norm = \"backward\"\n return ivy.ifft(a, axis, norm=norm, n=n)\n", "path": "ivy/functional/frontends/jax/numpy/fft.py"}], "after_files": [{"content": "# local\nimport ivy\nfrom ivy.functional.frontends.jax.func_wrapper import to_ivy_arrays_and_back\nfrom ivy.func_wrapper import with_unsupported_dtypes\n\n\n@to_ivy_arrays_and_back\ndef fft(a, n=None, axis=-1, norm=None):\n if norm is None:\n norm = \"backward\"\n return ivy.fft(a, axis, norm=norm, n=n)\n\n\n@to_ivy_arrays_and_back\ndef fft2(a, s=None, axes=(-2, -1), norm=None):\n if norm is None:\n norm = \"backward\"\n return ivy.array(ivy.fft2(a, s=s, dim=axes, norm=norm), dtype=ivy.dtype(a))\n\n\n@to_ivy_arrays_and_back\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\ndef fftshift(x, axes=None, name=None):\n shape = x.shape\n\n if axes is None:\n axes = tuple(range(x.ndim))\n shifts = [(dim // 2) for dim in shape]\n elif isinstance(axes, int):\n shifts = shape[axes] // 2\n else:\n shifts = [shape[ax] // 2 for ax in axes]\n\n roll = ivy.roll(x, shifts, axis=axes)\n\n return roll\n\n\n@to_ivy_arrays_and_back\ndef ifft(a, n=None, axis=-1, norm=None):\n if norm is None:\n norm = \"backward\"\n return ivy.ifft(a, axis, norm=norm, n=n)\n\n\n@to_ivy_arrays_and_back\ndef ifft2(a, s=None, axes=(-2, -1), norm=None):\n if norm is None:\n norm = \"backward\"\n return ivy.array(ivy.ifft2(a, s=s, dim=axes, norm=norm), dtype=ivy.dtype(a))\n", "path": "ivy/functional/frontends/jax/numpy/fft.py"}]}
| 705 | 181 |
gh_patches_debug_37299
|
rasdani/github-patches
|
git_diff
|
evennia__evennia-1725
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
develop: Website not correctly logging in when logged in in-game
#### Brief summary of issue / Description of requested feature:
When logging in in-game, the website does not detect it, but if you try to log in you still get a warning saying that you are already logged in.
#### Steps to reproduce the issue / Reasons for adding feature:
1. Don't log into the website but open the web client
2. Log into the webclient as usual.
3. Go back to the website - you are not shown as logged in, but clicking `Log in` will still give you an error.
#### Error output / Expected result of feature
When logged into the game, this should be reflected by the web site. See closed #1063.
#### Extra information, such as Evennia revision/repo/branch, operating system and ideas for how to solve / implement:
This is a regression, probably from changes in the session handling/sharing between client and website.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `evennia/web/utils/middleware.py`
Content:
```
1 from django.contrib.auth import authenticate, login
2 from evennia.accounts.models import AccountDB
3 from evennia.utils import logger
4
5 class SharedLoginMiddleware(object):
6 """
7 Handle the shared login between website and webclient.
8
9 """
10 def __init__(self, get_response):
11 # One-time configuration and initialization.
12 self.get_response = get_response
13
14 def __call__(self, request):
15 # Code to be executed for each request before
16 # the view (and later middleware) are called.
17
18 # Process view
19 response = self.get_response(request)
20
21 # Code to be executed for each request/response after
22 # the view is called.
23
24 # Synchronize credentials
25 self.make_shared_login(request)
26
27 # Return processed view
28 return response
29
30 @classmethod
31 def make_shared_login(cls, request):
32 csession = request.session
33 account = request.user
34 website_uid = csession.get("website_authenticated_uid", None)
35 webclient_uid = csession.get("webclient_authenticated_uid", None)
36
37 if not csession.session_key:
38 # this is necessary to build the sessid key
39 csession.save()
40
41 if account.is_authenticated():
42 # Logged into website
43 if not website_uid:
44 # fresh website login (just from login page)
45 csession["website_authenticated_uid"] = account.id
46 if webclient_uid is None:
47 # auto-login web client
48 csession["webclient_authenticated_uid"] = account.id
49
50 elif webclient_uid:
51 # Not logged into website, but logged into webclient
52 if not website_uid:
53 csession["website_authenticated_uid"] = account.id
54 account = AccountDB.objects.get(id=webclient_uid)
55 try:
56 # calls our custom authenticate, in web/utils/backend.py
57 authenticate(autologin=account)
58 login(request, account)
59 except AttributeError:
60 logger.log_trace()
```
Path: `evennia/web/webclient/views.py`
Content:
```
1
2 """
3 This contains a simple view for rendering the webclient
4 page and serve it eventual static content.
5
6 """
7 from __future__ import print_function
8 from django.shortcuts import render
9 from django.contrib.auth import login, authenticate
10
11 from evennia.accounts.models import AccountDB
12 from evennia.utils import logger
13
14
15 def _shared_login(request):
16 """
17 Handle the shared login between website and webclient.
18
19 """
20 csession = request.session
21 account = request.user
22 # these can have 3 values:
23 # None - previously unused (auto-login)
24 # False - actively logged out (don't auto-login)
25 # <uid> - logged in User/Account id
26 website_uid = csession.get("website_authenticated_uid", None)
27 webclient_uid = csession.get("webclient_authenticated_uid", None)
28
29 # check if user has authenticated to website
30 if not csession.session_key:
31 # this is necessary to build the sessid key
32 csession.save()
33
34 if webclient_uid:
35 # The webclient has previously registered a login to this browser_session
36 if not account.is_authenticated() and not website_uid:
37 try:
38 account = AccountDB.objects.get(id=webclient_uid)
39 except AccountDB.DoesNotExist:
40 # this can happen e.g. for guest accounts or deletions
41 csession["website_authenticated_uid"] = False
42 csession["webclient_authenticated_uid"] = False
43 return
44 try:
45 # calls our custom authenticate in web/utils/backends.py
46 account = authenticate(autologin=account)
47 login(request, account)
48 csession["website_authenticated_uid"] = webclient_uid
49 except AttributeError:
50 logger.log_trace()
51
52
53 def webclient(request):
54 """
55 Webclient page template loading.
56
57 """
58 # handle webclient-website shared login
59 _shared_login(request)
60
61 # make sure to store the browser session's hash so the webclient can get to it!
62 pagevars = {'browser_sessid': request.session.session_key}
63
64 return render(request, 'webclient.html', pagevars)
65
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/evennia/web/utils/middleware.py b/evennia/web/utils/middleware.py
--- a/evennia/web/utils/middleware.py
+++ b/evennia/web/utils/middleware.py
@@ -15,15 +15,16 @@
# Code to be executed for each request before
# the view (and later middleware) are called.
+ # Synchronize credentials between webclient and website
+ # Must be performed *before* rendering the view (issue #1723)
+ self.make_shared_login(request)
+
# Process view
response = self.get_response(request)
# Code to be executed for each request/response after
# the view is called.
- # Synchronize credentials
- self.make_shared_login(request)
-
# Return processed view
return response
diff --git a/evennia/web/webclient/views.py b/evennia/web/webclient/views.py
--- a/evennia/web/webclient/views.py
+++ b/evennia/web/webclient/views.py
@@ -12,52 +12,13 @@
from evennia.utils import logger
-def _shared_login(request):
- """
- Handle the shared login between website and webclient.
-
- """
- csession = request.session
- account = request.user
- # these can have 3 values:
- # None - previously unused (auto-login)
- # False - actively logged out (don't auto-login)
- # <uid> - logged in User/Account id
- website_uid = csession.get("website_authenticated_uid", None)
- webclient_uid = csession.get("webclient_authenticated_uid", None)
-
- # check if user has authenticated to website
- if not csession.session_key:
- # this is necessary to build the sessid key
- csession.save()
-
- if webclient_uid:
- # The webclient has previously registered a login to this browser_session
- if not account.is_authenticated() and not website_uid:
- try:
- account = AccountDB.objects.get(id=webclient_uid)
- except AccountDB.DoesNotExist:
- # this can happen e.g. for guest accounts or deletions
- csession["website_authenticated_uid"] = False
- csession["webclient_authenticated_uid"] = False
- return
- try:
- # calls our custom authenticate in web/utils/backends.py
- account = authenticate(autologin=account)
- login(request, account)
- csession["website_authenticated_uid"] = webclient_uid
- except AttributeError:
- logger.log_trace()
-
-
def webclient(request):
"""
Webclient page template loading.
"""
- # handle webclient-website shared login
- _shared_login(request)
-
+ # auto-login is now handled by evennia.web.utils.middleware
+
# make sure to store the browser session's hash so the webclient can get to it!
pagevars = {'browser_sessid': request.session.session_key}
|
{"golden_diff": "diff --git a/evennia/web/utils/middleware.py b/evennia/web/utils/middleware.py\n--- a/evennia/web/utils/middleware.py\n+++ b/evennia/web/utils/middleware.py\n@@ -15,15 +15,16 @@\n # Code to be executed for each request before\n # the view (and later middleware) are called.\n \n+ # Synchronize credentials between webclient and website\n+ # Must be performed *before* rendering the view (issue #1723)\n+ self.make_shared_login(request)\n+ \n # Process view\n response = self.get_response(request)\n \n # Code to be executed for each request/response after\n # the view is called.\n \n- # Synchronize credentials\n- self.make_shared_login(request)\n- \n # Return processed view\n return response\n \ndiff --git a/evennia/web/webclient/views.py b/evennia/web/webclient/views.py\n--- a/evennia/web/webclient/views.py\n+++ b/evennia/web/webclient/views.py\n@@ -12,52 +12,13 @@\n from evennia.utils import logger\n \n \n-def _shared_login(request):\n- \"\"\"\n- Handle the shared login between website and webclient.\n-\n- \"\"\"\n- csession = request.session\n- account = request.user\n- # these can have 3 values:\n- # None - previously unused (auto-login)\n- # False - actively logged out (don't auto-login)\n- # <uid> - logged in User/Account id\n- website_uid = csession.get(\"website_authenticated_uid\", None)\n- webclient_uid = csession.get(\"webclient_authenticated_uid\", None)\n-\n- # check if user has authenticated to website\n- if not csession.session_key:\n- # this is necessary to build the sessid key\n- csession.save()\n-\n- if webclient_uid:\n- # The webclient has previously registered a login to this browser_session\n- if not account.is_authenticated() and not website_uid:\n- try:\n- account = AccountDB.objects.get(id=webclient_uid)\n- except AccountDB.DoesNotExist:\n- # this can happen e.g. for guest accounts or deletions\n- csession[\"website_authenticated_uid\"] = False\n- csession[\"webclient_authenticated_uid\"] = False\n- return\n- try:\n- # calls our custom authenticate in web/utils/backends.py\n- account = authenticate(autologin=account)\n- login(request, account)\n- csession[\"website_authenticated_uid\"] = webclient_uid\n- except AttributeError:\n- logger.log_trace()\n-\n-\n def webclient(request):\n \"\"\"\n Webclient page template loading.\n \n \"\"\"\n- # handle webclient-website shared login\n- _shared_login(request)\n-\n+ # auto-login is now handled by evennia.web.utils.middleware\n+ \n # make sure to store the browser session's hash so the webclient can get to it!\n pagevars = {'browser_sessid': request.session.session_key}\n", "issue": "develop: Website not correctly logging in when logged in in-game\n#### Brief summary of issue / Description of requested feature:\r\n\r\nWhen logging in in-game, the website does not detect it, but if you try to log in you still get a warning saying that you are already logged in.\r\n\r\n#### Steps to reproduce the issue / Reasons for adding feature:\r\n\r\n1. Don't log into the website but open the web client\r\n2. Log into the webclient as usual.\r\n3. Go back to the website - you are not shown as logged in, but clicking `Log in` will still give you an error. \r\n\r\n#### Error output / Expected result of feature\r\n\r\nWhen logged into the game, this should be reflected by the web site. See closed #1063. \r\n\r\n#### Extra information, such as Evennia revision/repo/branch, operating system and ideas for how to solve / implement:\r\n\r\nThis is a regression, probably from changes in the session handling/sharing between client and website.\n", "before_files": [{"content": "from django.contrib.auth import authenticate, login\nfrom evennia.accounts.models import AccountDB\nfrom evennia.utils import logger\n\nclass SharedLoginMiddleware(object):\n \"\"\"\n Handle the shared login between website and webclient.\n\n \"\"\"\n def __init__(self, get_response):\n # One-time configuration and initialization.\n self.get_response = get_response\n \n def __call__(self, request):\n # Code to be executed for each request before\n # the view (and later middleware) are called.\n \n # Process view\n response = self.get_response(request)\n\n # Code to be executed for each request/response after\n # the view is called.\n \n # Synchronize credentials\n self.make_shared_login(request)\n \n # Return processed view\n return response\n \n @classmethod\n def make_shared_login(cls, request):\n csession = request.session\n account = request.user\n website_uid = csession.get(\"website_authenticated_uid\", None)\n webclient_uid = csession.get(\"webclient_authenticated_uid\", None)\n \n if not csession.session_key:\n # this is necessary to build the sessid key\n csession.save()\n \n if account.is_authenticated():\n # Logged into website\n if not website_uid:\n # fresh website login (just from login page)\n csession[\"website_authenticated_uid\"] = account.id\n if webclient_uid is None:\n # auto-login web client\n csession[\"webclient_authenticated_uid\"] = account.id\n \n elif webclient_uid:\n # Not logged into website, but logged into webclient\n if not website_uid:\n csession[\"website_authenticated_uid\"] = account.id\n account = AccountDB.objects.get(id=webclient_uid)\n try:\n # calls our custom authenticate, in web/utils/backend.py\n authenticate(autologin=account)\n login(request, account)\n except AttributeError:\n logger.log_trace()", "path": "evennia/web/utils/middleware.py"}, {"content": "\n\"\"\"\nThis contains a simple view for rendering the webclient\npage and serve it eventual static content.\n\n\"\"\"\nfrom __future__ import print_function\nfrom django.shortcuts import render\nfrom django.contrib.auth import login, authenticate\n\nfrom evennia.accounts.models import AccountDB\nfrom evennia.utils import logger\n\n\ndef _shared_login(request):\n \"\"\"\n Handle the shared login between website and webclient.\n\n \"\"\"\n csession = request.session\n account = request.user\n # these can have 3 values:\n # None - previously unused (auto-login)\n # False - actively logged out (don't auto-login)\n # <uid> - logged in User/Account id\n website_uid = csession.get(\"website_authenticated_uid\", None)\n webclient_uid = csession.get(\"webclient_authenticated_uid\", None)\n\n # check if user has authenticated to website\n if not csession.session_key:\n # this is necessary to build the sessid key\n csession.save()\n\n if webclient_uid:\n # The webclient has previously registered a login to this browser_session\n if not account.is_authenticated() and not website_uid:\n try:\n account = AccountDB.objects.get(id=webclient_uid)\n except AccountDB.DoesNotExist:\n # this can happen e.g. for guest accounts or deletions\n csession[\"website_authenticated_uid\"] = False\n csession[\"webclient_authenticated_uid\"] = False\n return\n try:\n # calls our custom authenticate in web/utils/backends.py\n account = authenticate(autologin=account)\n login(request, account)\n csession[\"website_authenticated_uid\"] = webclient_uid\n except AttributeError:\n logger.log_trace()\n\n\ndef webclient(request):\n \"\"\"\n Webclient page template loading.\n\n \"\"\"\n # handle webclient-website shared login\n _shared_login(request)\n\n # make sure to store the browser session's hash so the webclient can get to it!\n pagevars = {'browser_sessid': request.session.session_key}\n\n return render(request, 'webclient.html', pagevars)\n", "path": "evennia/web/webclient/views.py"}], "after_files": [{"content": "from django.contrib.auth import authenticate, login\nfrom evennia.accounts.models import AccountDB\nfrom evennia.utils import logger\n\nclass SharedLoginMiddleware(object):\n \"\"\"\n Handle the shared login between website and webclient.\n\n \"\"\"\n def __init__(self, get_response):\n # One-time configuration and initialization.\n self.get_response = get_response\n \n def __call__(self, request):\n # Code to be executed for each request before\n # the view (and later middleware) are called.\n \n # Synchronize credentials between webclient and website\n # Must be performed *before* rendering the view (issue #1723)\n self.make_shared_login(request)\n \n # Process view\n response = self.get_response(request)\n\n # Code to be executed for each request/response after\n # the view is called.\n \n # Return processed view\n return response\n \n @classmethod\n def make_shared_login(cls, request):\n csession = request.session\n account = request.user\n website_uid = csession.get(\"website_authenticated_uid\", None)\n webclient_uid = csession.get(\"webclient_authenticated_uid\", None)\n \n if not csession.session_key:\n # this is necessary to build the sessid key\n csession.save()\n \n if account.is_authenticated():\n # Logged into website\n if not website_uid:\n # fresh website login (just from login page)\n csession[\"website_authenticated_uid\"] = account.id\n if webclient_uid is None:\n # auto-login web client\n csession[\"webclient_authenticated_uid\"] = account.id\n \n elif webclient_uid:\n # Not logged into website, but logged into webclient\n if not website_uid:\n csession[\"website_authenticated_uid\"] = account.id\n account = AccountDB.objects.get(id=webclient_uid)\n try:\n # calls our custom authenticate, in web/utils/backend.py\n authenticate(autologin=account)\n login(request, account)\n except AttributeError:\n logger.log_trace()", "path": "evennia/web/utils/middleware.py"}, {"content": "\n\"\"\"\nThis contains a simple view for rendering the webclient\npage and serve it eventual static content.\n\n\"\"\"\nfrom __future__ import print_function\nfrom django.shortcuts import render\nfrom django.contrib.auth import login, authenticate\n\nfrom evennia.accounts.models import AccountDB\nfrom evennia.utils import logger\n\n\ndef webclient(request):\n \"\"\"\n Webclient page template loading.\n\n \"\"\"\n # auto-login is now handled by evennia.web.utils.middleware\n \n # make sure to store the browser session's hash so the webclient can get to it!\n pagevars = {'browser_sessid': request.session.session_key}\n\n return render(request, 'webclient.html', pagevars)\n", "path": "evennia/web/webclient/views.py"}]}
| 1,570 | 674 |
gh_patches_debug_12647
|
rasdani/github-patches
|
git_diff
|
pre-commit__pre-commit-1672
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
bug: rbenv: no such command `install'
fails: https://travis-ci.com/github/mozilla-platform-ops/ronin_puppet/jobs/420816191
passes: https://travis-ci.com/github/mozilla-platform-ops/ronin_puppet/jobs/420881311
The difference in the failing job is pre-commit 2.8.1 (passing is using 2.7.1). It seems similar to https://stackoverflow.com/questions/17618113/the-command-rbenv-install-is-missing... perhaps Travis doesn't include that particular bit.
failure log snippet:
```
$ pre-commit run --all-files
[INFO] Installing environment for https://github.com/chriskuehl/puppet-pre-commit-hooks.git.
[INFO] Once installed this environment will be reused.
[INFO] This may take a few minutes...
An unexpected error has occurred: CalledProcessError: command: ('/bin/bash', '/home/travis/.cache/pre-commit/repoz146d2mg/rbenv-default/bin/rbenv', 'install', 'default')
return code: 1
expected return code: 0
stdout: (none)
stderr:
rbenv: no such command `install'
Check the log at /home/travis/.cache/pre-commit/pre-commit.log
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pre_commit/languages/ruby.py`
Content:
```
1 import contextlib
2 import functools
3 import os.path
4 import shutil
5 import tarfile
6 from typing import Generator
7 from typing import Sequence
8 from typing import Tuple
9
10 import pre_commit.constants as C
11 from pre_commit.envcontext import envcontext
12 from pre_commit.envcontext import PatchesT
13 from pre_commit.envcontext import UNSET
14 from pre_commit.envcontext import Var
15 from pre_commit.hook import Hook
16 from pre_commit.languages import helpers
17 from pre_commit.prefix import Prefix
18 from pre_commit.util import CalledProcessError
19 from pre_commit.util import clean_path_on_failure
20 from pre_commit.util import resource_bytesio
21
22 ENVIRONMENT_DIR = 'rbenv'
23 healthy = helpers.basic_healthy
24
25
26 @functools.lru_cache(maxsize=1)
27 def get_default_version() -> str:
28 if all(helpers.exe_exists(exe) for exe in ('ruby', 'gem')):
29 return 'system'
30 else:
31 return C.DEFAULT
32
33
34 def get_env_patch(
35 venv: str,
36 language_version: str,
37 ) -> PatchesT:
38 patches: PatchesT = (
39 ('GEM_HOME', os.path.join(venv, 'gems')),
40 ('GEM_PATH', UNSET),
41 ('BUNDLE_IGNORE_CONFIG', '1'),
42 )
43 if language_version == 'system':
44 patches += (
45 (
46 'PATH', (
47 os.path.join(venv, 'gems', 'bin'), os.pathsep,
48 Var('PATH'),
49 ),
50 ),
51 )
52 else: # pragma: win32 no cover
53 patches += (
54 ('RBENV_ROOT', venv),
55 ('RBENV_VERSION', language_version),
56 (
57 'PATH', (
58 os.path.join(venv, 'gems', 'bin'), os.pathsep,
59 os.path.join(venv, 'shims'), os.pathsep,
60 os.path.join(venv, 'bin'), os.pathsep, Var('PATH'),
61 ),
62 ),
63 )
64 return patches
65
66
67 @contextlib.contextmanager
68 def in_env(
69 prefix: Prefix,
70 language_version: str,
71 ) -> Generator[None, None, None]:
72 envdir = prefix.path(
73 helpers.environment_dir(ENVIRONMENT_DIR, language_version),
74 )
75 with envcontext(get_env_patch(envdir, language_version)):
76 yield
77
78
79 def _extract_resource(filename: str, dest: str) -> None:
80 with resource_bytesio(filename) as bio:
81 with tarfile.open(fileobj=bio) as tf:
82 tf.extractall(dest)
83
84
85 def _install_rbenv(
86 prefix: Prefix,
87 version: str,
88 ) -> None: # pragma: win32 no cover
89 directory = helpers.environment_dir(ENVIRONMENT_DIR, version)
90
91 _extract_resource('rbenv.tar.gz', prefix.path('.'))
92 shutil.move(prefix.path('rbenv'), prefix.path(directory))
93
94 # Only install ruby-build if the version is specified
95 if version != C.DEFAULT:
96 plugins_dir = prefix.path(directory, 'plugins')
97 _extract_resource('ruby-download.tar.gz', plugins_dir)
98 _extract_resource('ruby-build.tar.gz', plugins_dir)
99
100
101 def _install_ruby(
102 prefix: Prefix,
103 version: str,
104 ) -> None: # pragma: win32 no cover
105 try:
106 helpers.run_setup_cmd(prefix, ('rbenv', 'download', version))
107 except CalledProcessError: # pragma: no cover (usually find with download)
108 # Failed to download from mirror for some reason, build it instead
109 helpers.run_setup_cmd(prefix, ('rbenv', 'install', version))
110
111
112 def install_environment(
113 prefix: Prefix, version: str, additional_dependencies: Sequence[str],
114 ) -> None:
115 additional_dependencies = tuple(additional_dependencies)
116 directory = helpers.environment_dir(ENVIRONMENT_DIR, version)
117 with clean_path_on_failure(prefix.path(directory)):
118 if version != 'system': # pragma: win32 no cover
119 _install_rbenv(prefix, version)
120 with in_env(prefix, version):
121 # Need to call this before installing so rbenv's directories
122 # are set up
123 helpers.run_setup_cmd(prefix, ('rbenv', 'init', '-'))
124 # XXX: this will *always* fail if `version == C.DEFAULT`
125 _install_ruby(prefix, version)
126 # Need to call this after installing to set up the shims
127 helpers.run_setup_cmd(prefix, ('rbenv', 'rehash'))
128
129 with in_env(prefix, version):
130 helpers.run_setup_cmd(
131 prefix, ('gem', 'build', *prefix.star('.gemspec')),
132 )
133 helpers.run_setup_cmd(
134 prefix,
135 (
136 'gem', 'install',
137 '--no-document', '--no-format-executable',
138 *prefix.star('.gem'), *additional_dependencies,
139 ),
140 )
141
142
143 def run_hook(
144 hook: Hook,
145 file_args: Sequence[str],
146 color: bool,
147 ) -> Tuple[int, bytes]:
148 with in_env(hook.prefix, hook.language_version):
149 return helpers.run_xargs(hook, hook.cmd, file_args, color=color)
150
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pre_commit/languages/ruby.py b/pre_commit/languages/ruby.py
--- a/pre_commit/languages/ruby.py
+++ b/pre_commit/languages/ruby.py
@@ -121,8 +121,8 @@
# Need to call this before installing so rbenv's directories
# are set up
helpers.run_setup_cmd(prefix, ('rbenv', 'init', '-'))
- # XXX: this will *always* fail if `version == C.DEFAULT`
- _install_ruby(prefix, version)
+ if version != C.DEFAULT:
+ _install_ruby(prefix, version)
# Need to call this after installing to set up the shims
helpers.run_setup_cmd(prefix, ('rbenv', 'rehash'))
|
{"golden_diff": "diff --git a/pre_commit/languages/ruby.py b/pre_commit/languages/ruby.py\n--- a/pre_commit/languages/ruby.py\n+++ b/pre_commit/languages/ruby.py\n@@ -121,8 +121,8 @@\n # Need to call this before installing so rbenv's directories\n # are set up\n helpers.run_setup_cmd(prefix, ('rbenv', 'init', '-'))\n- # XXX: this will *always* fail if `version == C.DEFAULT`\n- _install_ruby(prefix, version)\n+ if version != C.DEFAULT:\n+ _install_ruby(prefix, version)\n # Need to call this after installing to set up the shims\n helpers.run_setup_cmd(prefix, ('rbenv', 'rehash'))\n", "issue": "bug: rbenv: no such command `install'\nfails: https://travis-ci.com/github/mozilla-platform-ops/ronin_puppet/jobs/420816191\r\npasses: https://travis-ci.com/github/mozilla-platform-ops/ronin_puppet/jobs/420881311\r\n\r\nThe difference in the failing job is pre-commit 2.8.1 (passing is using 2.7.1). It seems similar to https://stackoverflow.com/questions/17618113/the-command-rbenv-install-is-missing... perhaps Travis doesn't include that particular bit.\r\n\r\nfailure log snippet:\r\n```\r\n$ pre-commit run --all-files\r\n\r\n[INFO] Installing environment for https://github.com/chriskuehl/puppet-pre-commit-hooks.git.\r\n\r\n[INFO] Once installed this environment will be reused.\r\n\r\n[INFO] This may take a few minutes...\r\n\r\nAn unexpected error has occurred: CalledProcessError: command: ('/bin/bash', '/home/travis/.cache/pre-commit/repoz146d2mg/rbenv-default/bin/rbenv', 'install', 'default')\r\n\r\nreturn code: 1\r\n\r\nexpected return code: 0\r\n\r\nstdout: (none)\r\n\r\nstderr:\r\n\r\n rbenv: no such command `install'\r\n\r\n \r\n\r\nCheck the log at /home/travis/.cache/pre-commit/pre-commit.log\r\n```\n", "before_files": [{"content": "import contextlib\nimport functools\nimport os.path\nimport shutil\nimport tarfile\nfrom typing import Generator\nfrom typing import Sequence\nfrom typing import Tuple\n\nimport pre_commit.constants as C\nfrom pre_commit.envcontext import envcontext\nfrom pre_commit.envcontext import PatchesT\nfrom pre_commit.envcontext import UNSET\nfrom pre_commit.envcontext import Var\nfrom pre_commit.hook import Hook\nfrom pre_commit.languages import helpers\nfrom pre_commit.prefix import Prefix\nfrom pre_commit.util import CalledProcessError\nfrom pre_commit.util import clean_path_on_failure\nfrom pre_commit.util import resource_bytesio\n\nENVIRONMENT_DIR = 'rbenv'\nhealthy = helpers.basic_healthy\n\n\[email protected]_cache(maxsize=1)\ndef get_default_version() -> str:\n if all(helpers.exe_exists(exe) for exe in ('ruby', 'gem')):\n return 'system'\n else:\n return C.DEFAULT\n\n\ndef get_env_patch(\n venv: str,\n language_version: str,\n) -> PatchesT:\n patches: PatchesT = (\n ('GEM_HOME', os.path.join(venv, 'gems')),\n ('GEM_PATH', UNSET),\n ('BUNDLE_IGNORE_CONFIG', '1'),\n )\n if language_version == 'system':\n patches += (\n (\n 'PATH', (\n os.path.join(venv, 'gems', 'bin'), os.pathsep,\n Var('PATH'),\n ),\n ),\n )\n else: # pragma: win32 no cover\n patches += (\n ('RBENV_ROOT', venv),\n ('RBENV_VERSION', language_version),\n (\n 'PATH', (\n os.path.join(venv, 'gems', 'bin'), os.pathsep,\n os.path.join(venv, 'shims'), os.pathsep,\n os.path.join(venv, 'bin'), os.pathsep, Var('PATH'),\n ),\n ),\n )\n return patches\n\n\[email protected]\ndef in_env(\n prefix: Prefix,\n language_version: str,\n) -> Generator[None, None, None]:\n envdir = prefix.path(\n helpers.environment_dir(ENVIRONMENT_DIR, language_version),\n )\n with envcontext(get_env_patch(envdir, language_version)):\n yield\n\n\ndef _extract_resource(filename: str, dest: str) -> None:\n with resource_bytesio(filename) as bio:\n with tarfile.open(fileobj=bio) as tf:\n tf.extractall(dest)\n\n\ndef _install_rbenv(\n prefix: Prefix,\n version: str,\n) -> None: # pragma: win32 no cover\n directory = helpers.environment_dir(ENVIRONMENT_DIR, version)\n\n _extract_resource('rbenv.tar.gz', prefix.path('.'))\n shutil.move(prefix.path('rbenv'), prefix.path(directory))\n\n # Only install ruby-build if the version is specified\n if version != C.DEFAULT:\n plugins_dir = prefix.path(directory, 'plugins')\n _extract_resource('ruby-download.tar.gz', plugins_dir)\n _extract_resource('ruby-build.tar.gz', plugins_dir)\n\n\ndef _install_ruby(\n prefix: Prefix,\n version: str,\n) -> None: # pragma: win32 no cover\n try:\n helpers.run_setup_cmd(prefix, ('rbenv', 'download', version))\n except CalledProcessError: # pragma: no cover (usually find with download)\n # Failed to download from mirror for some reason, build it instead\n helpers.run_setup_cmd(prefix, ('rbenv', 'install', version))\n\n\ndef install_environment(\n prefix: Prefix, version: str, additional_dependencies: Sequence[str],\n) -> None:\n additional_dependencies = tuple(additional_dependencies)\n directory = helpers.environment_dir(ENVIRONMENT_DIR, version)\n with clean_path_on_failure(prefix.path(directory)):\n if version != 'system': # pragma: win32 no cover\n _install_rbenv(prefix, version)\n with in_env(prefix, version):\n # Need to call this before installing so rbenv's directories\n # are set up\n helpers.run_setup_cmd(prefix, ('rbenv', 'init', '-'))\n # XXX: this will *always* fail if `version == C.DEFAULT`\n _install_ruby(prefix, version)\n # Need to call this after installing to set up the shims\n helpers.run_setup_cmd(prefix, ('rbenv', 'rehash'))\n\n with in_env(prefix, version):\n helpers.run_setup_cmd(\n prefix, ('gem', 'build', *prefix.star('.gemspec')),\n )\n helpers.run_setup_cmd(\n prefix,\n (\n 'gem', 'install',\n '--no-document', '--no-format-executable',\n *prefix.star('.gem'), *additional_dependencies,\n ),\n )\n\n\ndef run_hook(\n hook: Hook,\n file_args: Sequence[str],\n color: bool,\n) -> Tuple[int, bytes]:\n with in_env(hook.prefix, hook.language_version):\n return helpers.run_xargs(hook, hook.cmd, file_args, color=color)\n", "path": "pre_commit/languages/ruby.py"}], "after_files": [{"content": "import contextlib\nimport functools\nimport os.path\nimport shutil\nimport tarfile\nfrom typing import Generator\nfrom typing import Sequence\nfrom typing import Tuple\n\nimport pre_commit.constants as C\nfrom pre_commit.envcontext import envcontext\nfrom pre_commit.envcontext import PatchesT\nfrom pre_commit.envcontext import UNSET\nfrom pre_commit.envcontext import Var\nfrom pre_commit.hook import Hook\nfrom pre_commit.languages import helpers\nfrom pre_commit.prefix import Prefix\nfrom pre_commit.util import CalledProcessError\nfrom pre_commit.util import clean_path_on_failure\nfrom pre_commit.util import resource_bytesio\n\nENVIRONMENT_DIR = 'rbenv'\nhealthy = helpers.basic_healthy\n\n\[email protected]_cache(maxsize=1)\ndef get_default_version() -> str:\n if all(helpers.exe_exists(exe) for exe in ('ruby', 'gem')):\n return 'system'\n else:\n return C.DEFAULT\n\n\ndef get_env_patch(\n venv: str,\n language_version: str,\n) -> PatchesT:\n patches: PatchesT = (\n ('GEM_HOME', os.path.join(venv, 'gems')),\n ('GEM_PATH', UNSET),\n ('BUNDLE_IGNORE_CONFIG', '1'),\n )\n if language_version == 'system':\n patches += (\n (\n 'PATH', (\n os.path.join(venv, 'gems', 'bin'), os.pathsep,\n Var('PATH'),\n ),\n ),\n )\n else: # pragma: win32 no cover\n patches += (\n ('RBENV_ROOT', venv),\n ('RBENV_VERSION', language_version),\n (\n 'PATH', (\n os.path.join(venv, 'gems', 'bin'), os.pathsep,\n os.path.join(venv, 'shims'), os.pathsep,\n os.path.join(venv, 'bin'), os.pathsep, Var('PATH'),\n ),\n ),\n )\n return patches\n\n\[email protected]\ndef in_env(\n prefix: Prefix,\n language_version: str,\n) -> Generator[None, None, None]:\n envdir = prefix.path(\n helpers.environment_dir(ENVIRONMENT_DIR, language_version),\n )\n with envcontext(get_env_patch(envdir, language_version)):\n yield\n\n\ndef _extract_resource(filename: str, dest: str) -> None:\n with resource_bytesio(filename) as bio:\n with tarfile.open(fileobj=bio) as tf:\n tf.extractall(dest)\n\n\ndef _install_rbenv(\n prefix: Prefix,\n version: str,\n) -> None: # pragma: win32 no cover\n directory = helpers.environment_dir(ENVIRONMENT_DIR, version)\n\n _extract_resource('rbenv.tar.gz', prefix.path('.'))\n shutil.move(prefix.path('rbenv'), prefix.path(directory))\n\n # Only install ruby-build if the version is specified\n if version != C.DEFAULT:\n plugins_dir = prefix.path(directory, 'plugins')\n _extract_resource('ruby-download.tar.gz', plugins_dir)\n _extract_resource('ruby-build.tar.gz', plugins_dir)\n\n\ndef _install_ruby(\n prefix: Prefix,\n version: str,\n) -> None: # pragma: win32 no cover\n try:\n helpers.run_setup_cmd(prefix, ('rbenv', 'download', version))\n except CalledProcessError: # pragma: no cover (usually find with download)\n # Failed to download from mirror for some reason, build it instead\n helpers.run_setup_cmd(prefix, ('rbenv', 'install', version))\n\n\ndef install_environment(\n prefix: Prefix, version: str, additional_dependencies: Sequence[str],\n) -> None:\n additional_dependencies = tuple(additional_dependencies)\n directory = helpers.environment_dir(ENVIRONMENT_DIR, version)\n with clean_path_on_failure(prefix.path(directory)):\n if version != 'system': # pragma: win32 no cover\n _install_rbenv(prefix, version)\n with in_env(prefix, version):\n # Need to call this before installing so rbenv's directories\n # are set up\n helpers.run_setup_cmd(prefix, ('rbenv', 'init', '-'))\n if version != C.DEFAULT:\n _install_ruby(prefix, version)\n # Need to call this after installing to set up the shims\n helpers.run_setup_cmd(prefix, ('rbenv', 'rehash'))\n\n with in_env(prefix, version):\n helpers.run_setup_cmd(\n prefix, ('gem', 'build', *prefix.star('.gemspec')),\n )\n helpers.run_setup_cmd(\n prefix,\n (\n 'gem', 'install',\n '--no-document', '--no-format-executable',\n *prefix.star('.gem'), *additional_dependencies,\n ),\n )\n\n\ndef run_hook(\n hook: Hook,\n file_args: Sequence[str],\n color: bool,\n) -> Tuple[int, bytes]:\n with in_env(hook.prefix, hook.language_version):\n return helpers.run_xargs(hook, hook.cmd, file_args, color=color)\n", "path": "pre_commit/languages/ruby.py"}]}
| 1,984 | 166 |
gh_patches_debug_18069
|
rasdani/github-patches
|
git_diff
|
microsoft__torchgeo-1647
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Checkpoint saving not working as expected
### Description
After migrating to release 0.5.0 noticed that checkpoint saving is not working as expected.
## description
tried different configuration e.g., `checkpoint_callback = ModelCheckpoint(monitor="val_loss", dirpath=ckpt_dir, save_last=True, every_n_epochs=1, save_top_k=1)` for example when running 20-30 epochs for training a model.
after training was completed could not find the ckpt file. what was found was a single ckpt file of the first epoch only, in a wrong directory.
## severance
the bug is very limiting. for example, after hours of training a model, there is no way to load the model from a checkpoint to run inference. the single shot to run inference was during the same run.
## expected behavior
using a given configuration expected to see:
- checkpoint files saved every number of epoch
- the last epoch checkpoint file
- the checkpoints should have been saved to the given directory
## observed behavior
- after training several epochs only the first was saved.
- the single checkpoint were saved to another directory under the logger output
## initial investigation
1. checkpoint callback created and training fit called
2. later, see image and call stack:
seems like c'tor called again with save_last=None

3. when saving later supposed to happen, the save_last is None:

4. last checkpoint saving is skipped
### Steps to reproduce
1. create a checkpoint callback and use different checkpoints saving parameters e.g., checkpoint_callback = ModelCheckpoint(monitor="val_loss", dirpath=ckpt_dir, save_last=True, every_n_epochs=1, save_top_k=1)
2. call trainer fit and run several epochs
3. check expected results:
- saving location as expected e.g., under C:\foo
- check last epoch checkpoint saved - must have last.ckpt
- check how many checkpoints were saved e.g., every 2 etc
### Version
torchgeo version 0.5.0, lightning version 2.0.9
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `torchgeo/trainers/base.py`
Content:
```
1 # Copyright (c) Microsoft Corporation. All rights reserved.
2 # Licensed under the MIT License.
3
4 """Base classes for all :mod:`torchgeo` trainers."""
5
6 from abc import ABC, abstractmethod
7 from typing import Any
8
9 import lightning
10 from lightning.pytorch import LightningModule
11 from lightning.pytorch.callbacks import Callback, EarlyStopping, ModelCheckpoint
12 from torch.optim import AdamW
13 from torch.optim.lr_scheduler import ReduceLROnPlateau
14
15
16 class BaseTask(LightningModule, ABC):
17 """Abstract base class for all TorchGeo trainers.
18
19 .. versionadded:: 0.5
20 """
21
22 #: Model to train.
23 model: Any
24
25 #: Performance metric to monitor in learning rate scheduler and callbacks.
26 monitor = "val_loss"
27
28 #: Whether the goal is to minimize or maximize the performance metric to monitor.
29 mode = "min"
30
31 def __init__(self) -> None:
32 """Initialize a new BaseTask instance."""
33 super().__init__()
34 self.save_hyperparameters()
35 self.configure_losses()
36 self.configure_metrics()
37 self.configure_models()
38
39 def configure_callbacks(self) -> list[Callback]:
40 """Initialize model-specific callbacks.
41
42 Returns:
43 List of callbacks to apply.
44 """
45 return [
46 ModelCheckpoint(monitor=self.monitor, mode=self.mode),
47 EarlyStopping(monitor=self.monitor, mode=self.mode),
48 ]
49
50 def configure_losses(self) -> None:
51 """Initialize the loss criterion."""
52
53 def configure_metrics(self) -> None:
54 """Initialize the performance metrics."""
55
56 @abstractmethod
57 def configure_models(self) -> None:
58 """Initialize the model."""
59
60 def configure_optimizers(
61 self,
62 ) -> "lightning.pytorch.utilities.types.OptimizerLRSchedulerConfig":
63 """Initialize the optimizer and learning rate scheduler.
64
65 Returns:
66 Optimizer and learning rate scheduler.
67 """
68 optimizer = AdamW(self.parameters(), lr=self.hparams["lr"])
69 scheduler = ReduceLROnPlateau(optimizer, patience=self.hparams["patience"])
70 return {
71 "optimizer": optimizer,
72 "lr_scheduler": {"scheduler": scheduler, "monitor": self.monitor},
73 }
74
75 def forward(self, *args: Any, **kwargs: Any) -> Any:
76 """Forward pass of the model.
77
78 Args:
79 args: Arguments to pass to model.
80 kwargs: Keyword arguments to pass to model.
81
82 Returns:
83 Output of the model.
84 """
85 return self.model(*args, **kwargs)
86
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/torchgeo/trainers/base.py b/torchgeo/trainers/base.py
--- a/torchgeo/trainers/base.py
+++ b/torchgeo/trainers/base.py
@@ -8,7 +8,6 @@
import lightning
from lightning.pytorch import LightningModule
-from lightning.pytorch.callbacks import Callback, EarlyStopping, ModelCheckpoint
from torch.optim import AdamW
from torch.optim.lr_scheduler import ReduceLROnPlateau
@@ -36,17 +35,6 @@
self.configure_metrics()
self.configure_models()
- def configure_callbacks(self) -> list[Callback]:
- """Initialize model-specific callbacks.
-
- Returns:
- List of callbacks to apply.
- """
- return [
- ModelCheckpoint(monitor=self.monitor, mode=self.mode),
- EarlyStopping(monitor=self.monitor, mode=self.mode),
- ]
-
def configure_losses(self) -> None:
"""Initialize the loss criterion."""
|
{"golden_diff": "diff --git a/torchgeo/trainers/base.py b/torchgeo/trainers/base.py\n--- a/torchgeo/trainers/base.py\n+++ b/torchgeo/trainers/base.py\n@@ -8,7 +8,6 @@\n \n import lightning\n from lightning.pytorch import LightningModule\n-from lightning.pytorch.callbacks import Callback, EarlyStopping, ModelCheckpoint\n from torch.optim import AdamW\n from torch.optim.lr_scheduler import ReduceLROnPlateau\n \n@@ -36,17 +35,6 @@\n self.configure_metrics()\n self.configure_models()\n \n- def configure_callbacks(self) -> list[Callback]:\n- \"\"\"Initialize model-specific callbacks.\n-\n- Returns:\n- List of callbacks to apply.\n- \"\"\"\n- return [\n- ModelCheckpoint(monitor=self.monitor, mode=self.mode),\n- EarlyStopping(monitor=self.monitor, mode=self.mode),\n- ]\n-\n def configure_losses(self) -> None:\n \"\"\"Initialize the loss criterion.\"\"\"\n", "issue": "Checkpoint saving not working as expected\n### Description\r\n\r\nAfter migrating to release 0.5.0 noticed that checkpoint saving is not working as expected.\r\n\r\n## description\r\ntried different configuration e.g., `checkpoint_callback = ModelCheckpoint(monitor=\"val_loss\", dirpath=ckpt_dir, save_last=True, every_n_epochs=1, save_top_k=1)` for example when running 20-30 epochs for training a model.\r\nafter training was completed could not find the ckpt file. what was found was a single ckpt file of the first epoch only, in a wrong directory.\r\n\r\n## severance\r\nthe bug is very limiting. for example, after hours of training a model, there is no way to load the model from a checkpoint to run inference. the single shot to run inference was during the same run. \r\n\r\n## expected behavior\r\nusing a given configuration expected to see:\r\n- checkpoint files saved every number of epoch\r\n- the last epoch checkpoint file\r\n- the checkpoints should have been saved to the given directory\r\n\r\n## observed behavior\r\n- after training several epochs only the first was saved.\r\n- the single checkpoint were saved to another directory under the logger output\r\n\r\n## initial investigation\r\n1. checkpoint callback created and training fit called\r\n2. later, see image and call stack:\r\nseems like c'tor called again with save_last=None\r\n\r\n\r\n3. when saving later supposed to happen, the save_last is None:\r\n\r\n\r\n4. last checkpoint saving is skipped\r\n\r\n\r\n### Steps to reproduce\r\n\r\n1. create a checkpoint callback and use different checkpoints saving parameters e.g., checkpoint_callback = ModelCheckpoint(monitor=\"val_loss\", dirpath=ckpt_dir, save_last=True, every_n_epochs=1, save_top_k=1)\r\n2. call trainer fit and run several epochs\r\n3. check expected results:\r\n- saving location as expected e.g., under C:\\foo \r\n- check last epoch checkpoint saved - must have last.ckpt\r\n- check how many checkpoints were saved e.g., every 2 etc\r\n\r\n### Version\r\n\r\ntorchgeo version 0.5.0, lightning version 2.0.9\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\n\"\"\"Base classes for all :mod:`torchgeo` trainers.\"\"\"\n\nfrom abc import ABC, abstractmethod\nfrom typing import Any\n\nimport lightning\nfrom lightning.pytorch import LightningModule\nfrom lightning.pytorch.callbacks import Callback, EarlyStopping, ModelCheckpoint\nfrom torch.optim import AdamW\nfrom torch.optim.lr_scheduler import ReduceLROnPlateau\n\n\nclass BaseTask(LightningModule, ABC):\n \"\"\"Abstract base class for all TorchGeo trainers.\n\n .. versionadded:: 0.5\n \"\"\"\n\n #: Model to train.\n model: Any\n\n #: Performance metric to monitor in learning rate scheduler and callbacks.\n monitor = \"val_loss\"\n\n #: Whether the goal is to minimize or maximize the performance metric to monitor.\n mode = \"min\"\n\n def __init__(self) -> None:\n \"\"\"Initialize a new BaseTask instance.\"\"\"\n super().__init__()\n self.save_hyperparameters()\n self.configure_losses()\n self.configure_metrics()\n self.configure_models()\n\n def configure_callbacks(self) -> list[Callback]:\n \"\"\"Initialize model-specific callbacks.\n\n Returns:\n List of callbacks to apply.\n \"\"\"\n return [\n ModelCheckpoint(monitor=self.monitor, mode=self.mode),\n EarlyStopping(monitor=self.monitor, mode=self.mode),\n ]\n\n def configure_losses(self) -> None:\n \"\"\"Initialize the loss criterion.\"\"\"\n\n def configure_metrics(self) -> None:\n \"\"\"Initialize the performance metrics.\"\"\"\n\n @abstractmethod\n def configure_models(self) -> None:\n \"\"\"Initialize the model.\"\"\"\n\n def configure_optimizers(\n self,\n ) -> \"lightning.pytorch.utilities.types.OptimizerLRSchedulerConfig\":\n \"\"\"Initialize the optimizer and learning rate scheduler.\n\n Returns:\n Optimizer and learning rate scheduler.\n \"\"\"\n optimizer = AdamW(self.parameters(), lr=self.hparams[\"lr\"])\n scheduler = ReduceLROnPlateau(optimizer, patience=self.hparams[\"patience\"])\n return {\n \"optimizer\": optimizer,\n \"lr_scheduler\": {\"scheduler\": scheduler, \"monitor\": self.monitor},\n }\n\n def forward(self, *args: Any, **kwargs: Any) -> Any:\n \"\"\"Forward pass of the model.\n\n Args:\n args: Arguments to pass to model.\n kwargs: Keyword arguments to pass to model.\n\n Returns:\n Output of the model.\n \"\"\"\n return self.model(*args, **kwargs)\n", "path": "torchgeo/trainers/base.py"}], "after_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\n\"\"\"Base classes for all :mod:`torchgeo` trainers.\"\"\"\n\nfrom abc import ABC, abstractmethod\nfrom typing import Any\n\nimport lightning\nfrom lightning.pytorch import LightningModule\nfrom torch.optim import AdamW\nfrom torch.optim.lr_scheduler import ReduceLROnPlateau\n\n\nclass BaseTask(LightningModule, ABC):\n \"\"\"Abstract base class for all TorchGeo trainers.\n\n .. versionadded:: 0.5\n \"\"\"\n\n #: Model to train.\n model: Any\n\n #: Performance metric to monitor in learning rate scheduler and callbacks.\n monitor = \"val_loss\"\n\n #: Whether the goal is to minimize or maximize the performance metric to monitor.\n mode = \"min\"\n\n def __init__(self) -> None:\n \"\"\"Initialize a new BaseTask instance.\"\"\"\n super().__init__()\n self.save_hyperparameters()\n self.configure_losses()\n self.configure_metrics()\n self.configure_models()\n\n def configure_losses(self) -> None:\n \"\"\"Initialize the loss criterion.\"\"\"\n\n def configure_metrics(self) -> None:\n \"\"\"Initialize the performance metrics.\"\"\"\n\n @abstractmethod\n def configure_models(self) -> None:\n \"\"\"Initialize the model.\"\"\"\n\n def configure_optimizers(\n self,\n ) -> \"lightning.pytorch.utilities.types.OptimizerLRSchedulerConfig\":\n \"\"\"Initialize the optimizer and learning rate scheduler.\n\n Returns:\n Optimizer and learning rate scheduler.\n \"\"\"\n optimizer = AdamW(self.parameters(), lr=self.hparams[\"lr\"])\n scheduler = ReduceLROnPlateau(optimizer, patience=self.hparams[\"patience\"])\n return {\n \"optimizer\": optimizer,\n \"lr_scheduler\": {\"scheduler\": scheduler, \"monitor\": self.monitor},\n }\n\n def forward(self, *args: Any, **kwargs: Any) -> Any:\n \"\"\"Forward pass of the model.\n\n Args:\n args: Arguments to pass to model.\n kwargs: Keyword arguments to pass to model.\n\n Returns:\n Output of the model.\n \"\"\"\n return self.model(*args, **kwargs)\n", "path": "torchgeo/trainers/base.py"}]}
| 1,494 | 206 |
gh_patches_debug_14240
|
rasdani/github-patches
|
git_diff
|
python-telegram-bot__python-telegram-bot-87
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
@ character
If I type @ in a chat I got this. (using https://github.com/leandrotoledo/python-telegram-bot/blob/master/examples/echobot.py)
TypeError: b'hola @honguitobot' is not JSON serializable
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `examples/echobot.py`
Content:
```
1 #!/usr/bin/env python
2 #
3 # Simple Bot to reply Telegram messages
4 # Copyright (C) 2015 Leandro Toledo de Souza <[email protected]>
5 #
6 # This program is free software: you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation, either version 3 of the License, or
9 # (at your option) any later version.
10 #
11 # This program is distributed in the hope that it will be useful,
12 # but WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 # GNU General Public License for more details.
15 #
16 # You should have received a copy of the GNU General Public License
17 # along with this program. If not, see [http://www.gnu.org/licenses/].
18
19
20 import logging
21 import telegram
22
23
24 LAST_UPDATE_ID = None
25
26
27 def main():
28 global LAST_UPDATE_ID
29
30 logging.basicConfig(
31 format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
32
33 # Telegram Bot Authorization Token
34 bot = telegram.Bot('TOKEN')
35
36 # This will be our global variable to keep the latest update_id when requesting
37 # for updates. It starts with the latest update_id if available.
38 try:
39 LAST_UPDATE_ID = bot.getUpdates()[-1].update_id
40 except IndexError:
41 LAST_UPDATE_ID = None
42
43 while True:
44 echo(bot)
45
46
47 def echo(bot):
48 global LAST_UPDATE_ID
49
50 # Request updates after the last updated_id
51 for update in bot.getUpdates(offset=LAST_UPDATE_ID, timeout=10):
52 # chat_id is required to reply any message
53 chat_id = update.message.chat_id
54 message = update.message.text.encode('utf-8')
55
56 if (message):
57 # Reply the message
58 bot.sendMessage(chat_id=chat_id,
59 text=message)
60
61 # Updates global offset to get the new updates
62 LAST_UPDATE_ID = update.update_id + 1
63
64
65 if __name__ == '__main__':
66 main()
67
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/examples/echobot.py b/examples/echobot.py
--- a/examples/echobot.py
+++ b/examples/echobot.py
@@ -51,12 +51,12 @@
for update in bot.getUpdates(offset=LAST_UPDATE_ID, timeout=10):
# chat_id is required to reply any message
chat_id = update.message.chat_id
- message = update.message.text.encode('utf-8')
+ reply_text = update.message.text
- if (message):
+ if (reply_text):
# Reply the message
bot.sendMessage(chat_id=chat_id,
- text=message)
+ text=reply_text)
# Updates global offset to get the new updates
LAST_UPDATE_ID = update.update_id + 1
|
{"golden_diff": "diff --git a/examples/echobot.py b/examples/echobot.py\n--- a/examples/echobot.py\n+++ b/examples/echobot.py\n@@ -51,12 +51,12 @@\n for update in bot.getUpdates(offset=LAST_UPDATE_ID, timeout=10):\n # chat_id is required to reply any message\n chat_id = update.message.chat_id\n- message = update.message.text.encode('utf-8')\n+ reply_text = update.message.text\n \n- if (message):\n+ if (reply_text):\n # Reply the message\n bot.sendMessage(chat_id=chat_id,\n- text=message)\n+ text=reply_text)\n \n # Updates global offset to get the new updates\n LAST_UPDATE_ID = update.update_id + 1\n", "issue": "@ character\nIf I type @ in a chat I got this. (using https://github.com/leandrotoledo/python-telegram-bot/blob/master/examples/echobot.py)\n\nTypeError: b'hola @honguitobot' is not JSON serializable\n\n", "before_files": [{"content": "#!/usr/bin/env python\n#\n# Simple Bot to reply Telegram messages\n# Copyright (C) 2015 Leandro Toledo de Souza <[email protected]>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see [http://www.gnu.org/licenses/].\n\n\nimport logging\nimport telegram\n\n\nLAST_UPDATE_ID = None\n\n\ndef main():\n global LAST_UPDATE_ID\n\n logging.basicConfig(\n format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n\n # Telegram Bot Authorization Token\n bot = telegram.Bot('TOKEN')\n\n # This will be our global variable to keep the latest update_id when requesting\n # for updates. It starts with the latest update_id if available.\n try:\n LAST_UPDATE_ID = bot.getUpdates()[-1].update_id\n except IndexError:\n LAST_UPDATE_ID = None\n\n while True:\n echo(bot)\n\n\ndef echo(bot):\n global LAST_UPDATE_ID\n\n # Request updates after the last updated_id\n for update in bot.getUpdates(offset=LAST_UPDATE_ID, timeout=10):\n # chat_id is required to reply any message\n chat_id = update.message.chat_id\n message = update.message.text.encode('utf-8')\n\n if (message):\n # Reply the message\n bot.sendMessage(chat_id=chat_id,\n text=message)\n\n # Updates global offset to get the new updates\n LAST_UPDATE_ID = update.update_id + 1\n\n\nif __name__ == '__main__':\n main()\n", "path": "examples/echobot.py"}], "after_files": [{"content": "#!/usr/bin/env python\n#\n# Simple Bot to reply Telegram messages\n# Copyright (C) 2015 Leandro Toledo de Souza <[email protected]>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see [http://www.gnu.org/licenses/].\n\n\nimport logging\nimport telegram\n\n\nLAST_UPDATE_ID = None\n\n\ndef main():\n global LAST_UPDATE_ID\n\n logging.basicConfig(\n format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n\n # Telegram Bot Authorization Token\n bot = telegram.Bot('TOKEN')\n\n # This will be our global variable to keep the latest update_id when requesting\n # for updates. It starts with the latest update_id if available.\n try:\n LAST_UPDATE_ID = bot.getUpdates()[-1].update_id\n except IndexError:\n LAST_UPDATE_ID = None\n\n while True:\n echo(bot)\n\n\ndef echo(bot):\n global LAST_UPDATE_ID\n\n # Request updates after the last updated_id\n for update in bot.getUpdates(offset=LAST_UPDATE_ID, timeout=10):\n # chat_id is required to reply any message\n chat_id = update.message.chat_id\n reply_text = update.message.text\n\n if (reply_text):\n # Reply the message\n bot.sendMessage(chat_id=chat_id,\n text=reply_text)\n\n # Updates global offset to get the new updates\n LAST_UPDATE_ID = update.update_id + 1\n\n\nif __name__ == '__main__':\n main()\n", "path": "examples/echobot.py"}]}
| 896 | 170 |
gh_patches_debug_9136
|
rasdani/github-patches
|
git_diff
|
bokeh__bokeh-5537
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Replace `@$(...)` with `@$el.find(...)`
Unnecessary alias. Often people forget that `@$(...) != $(...)`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sphinx/source/docs/user_guide/examples/extensions_putting_together.py`
Content:
```
1 from bokeh.core.properties import String, Instance
2 from bokeh.models import LayoutDOM, Slider
3
4 CODE ="""
5 import * as _ from "underscore"
6 import * as $ from "jquery"
7
8 import * as p from "core/properties"
9 import {LayoutDOM, LayoutDOMView} from "models/layouts/layout_dom"
10
11 export class CustomView extends LayoutDOMView
12
13 initialize: (options) ->
14 super(options)
15
16 @render()
17
18 # Set Backbone listener so that when the Bokeh slider has a change
19 # event, we can process the new data
20 @listenTo(@model.slider, 'change', () => @render())
21
22 render: () ->
23 # Backbone Views create <div> elements by default, accessible as @$el.
24 # Many Bokeh views ignore this default <div>, and instead do things
25 # like draw to the HTML canvas. In this case though, we change the
26 # contents of the <div>, based on the current slider value.
27 @$el.html("<h1>#{ @model.text }: #{ @model.slider.value }</h1>")
28 @$('h1').css({ 'color': '#686d8e', 'background-color': '#2a3153' })
29
30 export class Custom extends LayoutDOM
31
32 # If there is an associated view, this is boilerplate.
33 default_view: CustomView
34
35 # The ``type`` class attribute should generally match exactly the name
36 # of the corresponding Python class.
37 type: "Custom"
38
39 # The @define block adds corresponding "properties" to the JS model. These
40 # should basically line up 1-1 with the Python model class. Most property
41 # types have counterparts, e.g. bokeh.core.properties.String will be
42 # p.String in the JS implementation. Where the JS type system is not yet
43 # as rich, you can use p.Any as a "wildcard" property type.
44 @define {
45 text: [ p.String ]
46 slider: [ p.Any ]
47 }
48 """
49
50 class Custom(LayoutDOM):
51
52 __implementation__ = CODE
53
54 text = String(default="Custom text")
55
56 slider = Instance(Slider)
57
58 from bokeh.io import show
59
60 from bokeh.layouts import column
61 from bokeh.models import Slider
62
63 slider = Slider(start=0, end=10, step=0.1, value=0, title="value")
64
65 custom = Custom(text="Special Slider Display", slider=slider)
66
67 layout = column(slider, custom)
68
69 show(layout)
70
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/sphinx/source/docs/user_guide/examples/extensions_putting_together.py b/sphinx/source/docs/user_guide/examples/extensions_putting_together.py
--- a/sphinx/source/docs/user_guide/examples/extensions_putting_together.py
+++ b/sphinx/source/docs/user_guide/examples/extensions_putting_together.py
@@ -25,7 +25,7 @@
# like draw to the HTML canvas. In this case though, we change the
# contents of the <div>, based on the current slider value.
@$el.html("<h1>#{ @model.text }: #{ @model.slider.value }</h1>")
- @$('h1').css({ 'color': '#686d8e', 'background-color': '#2a3153' })
+ @$el.find('h1').css({ 'color': '#686d8e', 'background-color': '#2a3153' })
export class Custom extends LayoutDOM
|
{"golden_diff": "diff --git a/sphinx/source/docs/user_guide/examples/extensions_putting_together.py b/sphinx/source/docs/user_guide/examples/extensions_putting_together.py\n--- a/sphinx/source/docs/user_guide/examples/extensions_putting_together.py\n+++ b/sphinx/source/docs/user_guide/examples/extensions_putting_together.py\n@@ -25,7 +25,7 @@\n # like draw to the HTML canvas. In this case though, we change the\n # contents of the <div>, based on the current slider value.\n @$el.html(\"<h1>#{ @model.text }: #{ @model.slider.value }</h1>\")\n- @$('h1').css({ 'color': '#686d8e', 'background-color': '#2a3153' })\n+ @$el.find('h1').css({ 'color': '#686d8e', 'background-color': '#2a3153' })\n \n export class Custom extends LayoutDOM\n", "issue": "Replace `@$(...)` with `@$el.find(...)`\nUnnecessary alias. Often people forget that `@$(...) != $(...)`.\n", "before_files": [{"content": "from bokeh.core.properties import String, Instance\nfrom bokeh.models import LayoutDOM, Slider\n\nCODE =\"\"\"\nimport * as _ from \"underscore\"\nimport * as $ from \"jquery\"\n\nimport * as p from \"core/properties\"\nimport {LayoutDOM, LayoutDOMView} from \"models/layouts/layout_dom\"\n\nexport class CustomView extends LayoutDOMView\n\n initialize: (options) ->\n super(options)\n\n @render()\n\n # Set Backbone listener so that when the Bokeh slider has a change\n # event, we can process the new data\n @listenTo(@model.slider, 'change', () => @render())\n\n render: () ->\n # Backbone Views create <div> elements by default, accessible as @$el.\n # Many Bokeh views ignore this default <div>, and instead do things\n # like draw to the HTML canvas. In this case though, we change the\n # contents of the <div>, based on the current slider value.\n @$el.html(\"<h1>#{ @model.text }: #{ @model.slider.value }</h1>\")\n @$('h1').css({ 'color': '#686d8e', 'background-color': '#2a3153' })\n\nexport class Custom extends LayoutDOM\n\n # If there is an associated view, this is boilerplate.\n default_view: CustomView\n\n # The ``type`` class attribute should generally match exactly the name\n # of the corresponding Python class.\n type: \"Custom\"\n\n # The @define block adds corresponding \"properties\" to the JS model. These\n # should basically line up 1-1 with the Python model class. Most property\n # types have counterparts, e.g. bokeh.core.properties.String will be\n # p.String in the JS implementation. Where the JS type system is not yet\n # as rich, you can use p.Any as a \"wildcard\" property type.\n @define {\n text: [ p.String ]\n slider: [ p.Any ]\n }\n\"\"\"\n\nclass Custom(LayoutDOM):\n\n __implementation__ = CODE\n\n text = String(default=\"Custom text\")\n\n slider = Instance(Slider)\n\nfrom bokeh.io import show\n\nfrom bokeh.layouts import column\nfrom bokeh.models import Slider\n\nslider = Slider(start=0, end=10, step=0.1, value=0, title=\"value\")\n\ncustom = Custom(text=\"Special Slider Display\", slider=slider)\n\nlayout = column(slider, custom)\n\nshow(layout)\n", "path": "sphinx/source/docs/user_guide/examples/extensions_putting_together.py"}], "after_files": [{"content": "from bokeh.core.properties import String, Instance\nfrom bokeh.models import LayoutDOM, Slider\n\nCODE =\"\"\"\nimport * as _ from \"underscore\"\nimport * as $ from \"jquery\"\n\nimport * as p from \"core/properties\"\nimport {LayoutDOM, LayoutDOMView} from \"models/layouts/layout_dom\"\n\nexport class CustomView extends LayoutDOMView\n\n initialize: (options) ->\n super(options)\n\n @render()\n\n # Set Backbone listener so that when the Bokeh slider has a change\n # event, we can process the new data\n @listenTo(@model.slider, 'change', () => @render())\n\n render: () ->\n # Backbone Views create <div> elements by default, accessible as @$el.\n # Many Bokeh views ignore this default <div>, and instead do things\n # like draw to the HTML canvas. In this case though, we change the\n # contents of the <div>, based on the current slider value.\n @$el.html(\"<h1>#{ @model.text }: #{ @model.slider.value }</h1>\")\n @$el.find('h1').css({ 'color': '#686d8e', 'background-color': '#2a3153' })\n\nexport class Custom extends LayoutDOM\n\n # If there is an associated view, this is boilerplate.\n default_view: CustomView\n\n # The ``type`` class attribute should generally match exactly the name\n # of the corresponding Python class.\n type: \"Custom\"\n\n # The @define block adds corresponding \"properties\" to the JS model. These\n # should basically line up 1-1 with the Python model class. Most property\n # types have counterparts, e.g. bokeh.core.properties.String will be\n # p.String in the JS implementation. Where the JS type system is not yet\n # as rich, you can use p.Any as a \"wildcard\" property type.\n @define {\n text: [ p.String ]\n slider: [ p.Any ]\n }\n\"\"\"\n\nclass Custom(LayoutDOM):\n\n __implementation__ = CODE\n\n text = String(default=\"Custom text\")\n\n slider = Instance(Slider)\n\nfrom bokeh.io import show\n\nfrom bokeh.layouts import column\nfrom bokeh.models import Slider\n\nslider = Slider(start=0, end=10, step=0.1, value=0, title=\"value\")\n\ncustom = Custom(text=\"Special Slider Display\", slider=slider)\n\nlayout = column(slider, custom)\n\nshow(layout)\n", "path": "sphinx/source/docs/user_guide/examples/extensions_putting_together.py"}]}
| 976 | 207 |
gh_patches_debug_26190
|
rasdani/github-patches
|
git_diff
|
numpy__numpy-13320
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
BUILD: what is generate_mtrand_c.py ?
xref #13163
Does anyone know what is ['numpy/random/mtrand/generate_mtrand_c.py'](https://github.com/numpy/numpy/blob/v1.16.2/numpy/random/mtrand/generate_mtrand_c.py) and why it is in ['MANIFEST.in'](https://github.com/numpy/numpy/blob/v1.16.2/MANIFEST.in#L11) ? Is it used by some downstream package? It does not seem to be used in [creating the wheels](https://github.com/MacPython/numpy-wheels)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `numpy/random/mtrand/generate_mtrand_c.py`
Content:
```
1 #!/usr/bin/env python
2 from __future__ import division, absolute_import, print_function
3
4 import sys
5 import re
6 import os
7
8 unused_internal_funcs = ['__Pyx_PrintItem',
9 '__Pyx_PrintNewline',
10 '__Pyx_ReRaise',
11 #'__Pyx_GetExcValue',
12 '__Pyx_ArgTypeTest',
13 '__Pyx_SetVtable',
14 '__Pyx_GetVtable',
15 '__Pyx_CreateClass']
16
17 if __name__ == '__main__':
18 # Use cython here so that long docstrings are broken up.
19 # This is needed for some VC++ compilers.
20 os.system('cython mtrand.pyx')
21 mtrand_c = open('mtrand.c', 'r')
22 processed = open('mtrand_pp.c', 'w')
23 unused_funcs_str = '(' + '|'.join(unused_internal_funcs) + ')'
24 uifpat = re.compile(r'static \w+ \*?'+unused_funcs_str+r'.*/\*proto\*/')
25 linepat = re.compile(r'/\* ".*/mtrand.pyx":')
26 for linenum, line in enumerate(mtrand_c):
27 m = re.match(r'^(\s+arrayObject\w*\s*=\s*[(])[(]PyObject\s*[*][)]',
28 line)
29 if m:
30 line = '%s(PyArrayObject *)%s' % (m.group(1), line[m.end():])
31 m = uifpat.match(line)
32 if m:
33 line = ''
34 m = re.search(unused_funcs_str, line)
35 if m:
36 print("%s was declared unused, but is used at line %d" % (m.group(),
37 linenum+1), file=sys.stderr)
38 line = linepat.sub(r'/* "mtrand.pyx":', line)
39 processed.write(line)
40 mtrand_c.close()
41 processed.close()
42 os.rename('mtrand_pp.c', 'mtrand.c')
43
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/numpy/random/mtrand/generate_mtrand_c.py b/numpy/random/mtrand/generate_mtrand_c.py
deleted file mode 100644
--- a/numpy/random/mtrand/generate_mtrand_c.py
+++ /dev/null
@@ -1,42 +0,0 @@
-#!/usr/bin/env python
-from __future__ import division, absolute_import, print_function
-
-import sys
-import re
-import os
-
-unused_internal_funcs = ['__Pyx_PrintItem',
- '__Pyx_PrintNewline',
- '__Pyx_ReRaise',
- #'__Pyx_GetExcValue',
- '__Pyx_ArgTypeTest',
- '__Pyx_SetVtable',
- '__Pyx_GetVtable',
- '__Pyx_CreateClass']
-
-if __name__ == '__main__':
- # Use cython here so that long docstrings are broken up.
- # This is needed for some VC++ compilers.
- os.system('cython mtrand.pyx')
- mtrand_c = open('mtrand.c', 'r')
- processed = open('mtrand_pp.c', 'w')
- unused_funcs_str = '(' + '|'.join(unused_internal_funcs) + ')'
- uifpat = re.compile(r'static \w+ \*?'+unused_funcs_str+r'.*/\*proto\*/')
- linepat = re.compile(r'/\* ".*/mtrand.pyx":')
- for linenum, line in enumerate(mtrand_c):
- m = re.match(r'^(\s+arrayObject\w*\s*=\s*[(])[(]PyObject\s*[*][)]',
- line)
- if m:
- line = '%s(PyArrayObject *)%s' % (m.group(1), line[m.end():])
- m = uifpat.match(line)
- if m:
- line = ''
- m = re.search(unused_funcs_str, line)
- if m:
- print("%s was declared unused, but is used at line %d" % (m.group(),
- linenum+1), file=sys.stderr)
- line = linepat.sub(r'/* "mtrand.pyx":', line)
- processed.write(line)
- mtrand_c.close()
- processed.close()
- os.rename('mtrand_pp.c', 'mtrand.c')
|
{"golden_diff": "diff --git a/numpy/random/mtrand/generate_mtrand_c.py b/numpy/random/mtrand/generate_mtrand_c.py\ndeleted file mode 100644\n--- a/numpy/random/mtrand/generate_mtrand_c.py\n+++ /dev/null\n@@ -1,42 +0,0 @@\n-#!/usr/bin/env python\n-from __future__ import division, absolute_import, print_function\n-\n-import sys\n-import re\n-import os\n-\n-unused_internal_funcs = ['__Pyx_PrintItem',\n- '__Pyx_PrintNewline',\n- '__Pyx_ReRaise',\n- #'__Pyx_GetExcValue',\n- '__Pyx_ArgTypeTest',\n- '__Pyx_SetVtable',\n- '__Pyx_GetVtable',\n- '__Pyx_CreateClass']\n-\n-if __name__ == '__main__':\n- # Use cython here so that long docstrings are broken up.\n- # This is needed for some VC++ compilers.\n- os.system('cython mtrand.pyx')\n- mtrand_c = open('mtrand.c', 'r')\n- processed = open('mtrand_pp.c', 'w')\n- unused_funcs_str = '(' + '|'.join(unused_internal_funcs) + ')'\n- uifpat = re.compile(r'static \\w+ \\*?'+unused_funcs_str+r'.*/\\*proto\\*/')\n- linepat = re.compile(r'/\\* \".*/mtrand.pyx\":')\n- for linenum, line in enumerate(mtrand_c):\n- m = re.match(r'^(\\s+arrayObject\\w*\\s*=\\s*[(])[(]PyObject\\s*[*][)]',\n- line)\n- if m:\n- line = '%s(PyArrayObject *)%s' % (m.group(1), line[m.end():])\n- m = uifpat.match(line)\n- if m:\n- line = ''\n- m = re.search(unused_funcs_str, line)\n- if m:\n- print(\"%s was declared unused, but is used at line %d\" % (m.group(),\n- linenum+1), file=sys.stderr)\n- line = linepat.sub(r'/* \"mtrand.pyx\":', line)\n- processed.write(line)\n- mtrand_c.close()\n- processed.close()\n- os.rename('mtrand_pp.c', 'mtrand.c')\n", "issue": "BUILD: what is generate_mtrand_c.py ?\nxref #13163 \r\n\r\nDoes anyone know what is ['numpy/random/mtrand/generate_mtrand_c.py'](https://github.com/numpy/numpy/blob/v1.16.2/numpy/random/mtrand/generate_mtrand_c.py) and why it is in ['MANIFEST.in'](https://github.com/numpy/numpy/blob/v1.16.2/MANIFEST.in#L11) ? Is it used by some downstream package? It does not seem to be used in [creating the wheels](https://github.com/MacPython/numpy-wheels)\n", "before_files": [{"content": "#!/usr/bin/env python\nfrom __future__ import division, absolute_import, print_function\n\nimport sys\nimport re\nimport os\n\nunused_internal_funcs = ['__Pyx_PrintItem',\n '__Pyx_PrintNewline',\n '__Pyx_ReRaise',\n #'__Pyx_GetExcValue',\n '__Pyx_ArgTypeTest',\n '__Pyx_SetVtable',\n '__Pyx_GetVtable',\n '__Pyx_CreateClass']\n\nif __name__ == '__main__':\n # Use cython here so that long docstrings are broken up.\n # This is needed for some VC++ compilers.\n os.system('cython mtrand.pyx')\n mtrand_c = open('mtrand.c', 'r')\n processed = open('mtrand_pp.c', 'w')\n unused_funcs_str = '(' + '|'.join(unused_internal_funcs) + ')'\n uifpat = re.compile(r'static \\w+ \\*?'+unused_funcs_str+r'.*/\\*proto\\*/')\n linepat = re.compile(r'/\\* \".*/mtrand.pyx\":')\n for linenum, line in enumerate(mtrand_c):\n m = re.match(r'^(\\s+arrayObject\\w*\\s*=\\s*[(])[(]PyObject\\s*[*][)]',\n line)\n if m:\n line = '%s(PyArrayObject *)%s' % (m.group(1), line[m.end():])\n m = uifpat.match(line)\n if m:\n line = ''\n m = re.search(unused_funcs_str, line)\n if m:\n print(\"%s was declared unused, but is used at line %d\" % (m.group(),\n linenum+1), file=sys.stderr)\n line = linepat.sub(r'/* \"mtrand.pyx\":', line)\n processed.write(line)\n mtrand_c.close()\n processed.close()\n os.rename('mtrand_pp.c', 'mtrand.c')\n", "path": "numpy/random/mtrand/generate_mtrand_c.py"}], "after_files": [{"content": null, "path": "numpy/random/mtrand/generate_mtrand_c.py"}]}
| 895 | 522 |
gh_patches_debug_36488
|
rasdani/github-patches
|
git_diff
|
alltheplaces__alltheplaces-2871
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Spider rubios is broken
During the global build at 2021-05-26-14-42-23, spider **rubios** failed with **0 features** and **0 errors**.
Here's [the log](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/logs/rubios.log) and [the output](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/rubios.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/rubios.geojson))
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `locations/spiders/rubios.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 import scrapy
3 import re
4
5 from locations.items import GeojsonPointItem
6
7
8 class RubiosSpider(scrapy.Spider):
9 name = "rubios"
10 item_attributes = { 'brand': "Rubio's" }
11 allowed_domains = ['rubios.com']
12 start_urls = (
13 'https://www.rubios.com/sitemap.xml',
14 )
15
16 def parse(self, response):
17 response.selector.remove_namespaces()
18 city_urls = response.xpath('//url/loc/text()').extract()
19 regex = re.compile(r'http\S+rubios.com/store-locations/\S+/\S+/\S+')
20 for path in city_urls:
21 if re.search(regex, path):
22 yield scrapy.Request(
23 path.strip(),
24 callback=self.parse_store,
25 )
26
27 def parse_store(self, response):
28
29 properties = {
30 'name': response.xpath('//span[@itemprop="name"]/text()').extract_first(),
31 'ref': response.xpath('//span[@itemprop="name"]/text()').extract_first(),
32 'addr_full': response.xpath('//span[@itemprop="streetAddress"]/text()').extract_first(),
33 'city': response.xpath('//span[@itemprop="addressLocality"]/text()').extract_first(),
34 'state': response.xpath('//span[@itemprop="addressRegion"]/text()').extract_first(),
35 'postcode': response.xpath('//span[@itemprop="postalCode"]/text()').extract_first(),
36 'phone': response.xpath('//span[@itemprop="telephone"]/a/text()').extract_first(),
37 'website': response.url,
38 'opening_hours': "".join(response.xpath('//div/div/div/span/span/span/text()').extract()).strip(),
39 'lon': float(response.xpath('//head/script[9]').extract_first().split('"coordinates":[')[1].split(']')[0].split(',')[0]),
40 'lat': float(response.xpath('//head/script[9]').extract_first().split('"coordinates":[')[1].split(']')[0].split(',')[1]),
41 }
42
43 yield GeojsonPointItem(**properties)
44
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/locations/spiders/rubios.py b/locations/spiders/rubios.py
--- a/locations/spiders/rubios.py
+++ b/locations/spiders/rubios.py
@@ -16,7 +16,7 @@
def parse(self, response):
response.selector.remove_namespaces()
city_urls = response.xpath('//url/loc/text()').extract()
- regex = re.compile(r'http\S+rubios.com/store-locations/\S+/\S+/\S+')
+ regex = re.compile(r'http\S+rubios.com/restaurant-locations/\S+/\S+/\S+')
for path in city_urls:
if re.search(regex, path):
yield scrapy.Request(
@@ -27,17 +27,18 @@
def parse_store(self, response):
properties = {
- 'name': response.xpath('//span[@itemprop="name"]/text()').extract_first(),
- 'ref': response.xpath('//span[@itemprop="name"]/text()').extract_first(),
- 'addr_full': response.xpath('//span[@itemprop="streetAddress"]/text()').extract_first(),
- 'city': response.xpath('//span[@itemprop="addressLocality"]/text()').extract_first(),
- 'state': response.xpath('//span[@itemprop="addressRegion"]/text()').extract_first(),
- 'postcode': response.xpath('//span[@itemprop="postalCode"]/text()').extract_first(),
- 'phone': response.xpath('//span[@itemprop="telephone"]/a/text()').extract_first(),
+ 'name': response.xpath('//meta[@property="og:title"]/@content').extract_first(),
+ 'ref': response.url,
+ 'addr_full': response.xpath('//meta[@property="og:street_address"]/@content').extract_first(),
+ 'city': response.xpath('//meta[@property="og:locality"]/@content').extract_first(),
+ 'state': response.xpath('//meta[@property="og:region"]/@content').extract_first(),
+ 'postcode': response.xpath('//meta[@property="og:postal_code"]/@content').extract_first(),
+ 'country': response.xpath('//meta[@property="og:country_name"]/@content').extract_first(),
+ 'phone': response.xpath('//@href[contains(.,"tel:")]').extract_first().replace('tel:', ''),
'website': response.url,
- 'opening_hours': "".join(response.xpath('//div/div/div/span/span/span/text()').extract()).strip(),
- 'lon': float(response.xpath('//head/script[9]').extract_first().split('"coordinates":[')[1].split(']')[0].split(',')[0]),
- 'lat': float(response.xpath('//head/script[9]').extract_first().split('"coordinates":[')[1].split(']')[0].split(',')[1]),
+ 'opening_hours': ''.join(response.css('.oh-wrapper ::text').extract()),
+ 'lon': response.xpath('//meta[@property="og:longitude"]/@content').extract_first(),
+ 'lat': response.xpath('//meta[@property="og:latitude"]/@content').extract_first(),
}
yield GeojsonPointItem(**properties)
|
{"golden_diff": "diff --git a/locations/spiders/rubios.py b/locations/spiders/rubios.py\n--- a/locations/spiders/rubios.py\n+++ b/locations/spiders/rubios.py\n@@ -16,7 +16,7 @@\n def parse(self, response):\n response.selector.remove_namespaces()\n city_urls = response.xpath('//url/loc/text()').extract()\n- regex = re.compile(r'http\\S+rubios.com/store-locations/\\S+/\\S+/\\S+')\n+ regex = re.compile(r'http\\S+rubios.com/restaurant-locations/\\S+/\\S+/\\S+')\n for path in city_urls:\n if re.search(regex, path):\n yield scrapy.Request(\n@@ -27,17 +27,18 @@\n def parse_store(self, response):\n \n properties = {\n- 'name': response.xpath('//span[@itemprop=\"name\"]/text()').extract_first(),\n- 'ref': response.xpath('//span[@itemprop=\"name\"]/text()').extract_first(),\n- 'addr_full': response.xpath('//span[@itemprop=\"streetAddress\"]/text()').extract_first(),\n- 'city': response.xpath('//span[@itemprop=\"addressLocality\"]/text()').extract_first(),\n- 'state': response.xpath('//span[@itemprop=\"addressRegion\"]/text()').extract_first(),\n- 'postcode': response.xpath('//span[@itemprop=\"postalCode\"]/text()').extract_first(),\n- 'phone': response.xpath('//span[@itemprop=\"telephone\"]/a/text()').extract_first(),\n+ 'name': response.xpath('//meta[@property=\"og:title\"]/@content').extract_first(),\n+ 'ref': response.url,\n+ 'addr_full': response.xpath('//meta[@property=\"og:street_address\"]/@content').extract_first(),\n+ 'city': response.xpath('//meta[@property=\"og:locality\"]/@content').extract_first(),\n+ 'state': response.xpath('//meta[@property=\"og:region\"]/@content').extract_first(),\n+ 'postcode': response.xpath('//meta[@property=\"og:postal_code\"]/@content').extract_first(),\n+ 'country': response.xpath('//meta[@property=\"og:country_name\"]/@content').extract_first(),\n+ 'phone': response.xpath('//@href[contains(.,\"tel:\")]').extract_first().replace('tel:', ''),\n 'website': response.url,\n- 'opening_hours': \"\".join(response.xpath('//div/div/div/span/span/span/text()').extract()).strip(),\n- 'lon': float(response.xpath('//head/script[9]').extract_first().split('\"coordinates\":[')[1].split(']')[0].split(',')[0]),\n- 'lat': float(response.xpath('//head/script[9]').extract_first().split('\"coordinates\":[')[1].split(']')[0].split(',')[1]),\n+ 'opening_hours': ''.join(response.css('.oh-wrapper ::text').extract()),\n+ 'lon': response.xpath('//meta[@property=\"og:longitude\"]/@content').extract_first(),\n+ 'lat': response.xpath('//meta[@property=\"og:latitude\"]/@content').extract_first(),\n }\n \n yield GeojsonPointItem(**properties)\n", "issue": "Spider rubios is broken\nDuring the global build at 2021-05-26-14-42-23, spider **rubios** failed with **0 features** and **0 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/logs/rubios.log) and [the output](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/rubios.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/rubios.geojson))\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport scrapy\nimport re\n\nfrom locations.items import GeojsonPointItem\n\n\nclass RubiosSpider(scrapy.Spider):\n name = \"rubios\"\n item_attributes = { 'brand': \"Rubio's\" }\n allowed_domains = ['rubios.com']\n start_urls = (\n 'https://www.rubios.com/sitemap.xml',\n )\n\n def parse(self, response):\n response.selector.remove_namespaces()\n city_urls = response.xpath('//url/loc/text()').extract()\n regex = re.compile(r'http\\S+rubios.com/store-locations/\\S+/\\S+/\\S+')\n for path in city_urls:\n if re.search(regex, path):\n yield scrapy.Request(\n path.strip(),\n callback=self.parse_store,\n )\n\n def parse_store(self, response):\n\n properties = {\n 'name': response.xpath('//span[@itemprop=\"name\"]/text()').extract_first(),\n 'ref': response.xpath('//span[@itemprop=\"name\"]/text()').extract_first(),\n 'addr_full': response.xpath('//span[@itemprop=\"streetAddress\"]/text()').extract_first(),\n 'city': response.xpath('//span[@itemprop=\"addressLocality\"]/text()').extract_first(),\n 'state': response.xpath('//span[@itemprop=\"addressRegion\"]/text()').extract_first(),\n 'postcode': response.xpath('//span[@itemprop=\"postalCode\"]/text()').extract_first(),\n 'phone': response.xpath('//span[@itemprop=\"telephone\"]/a/text()').extract_first(),\n 'website': response.url,\n 'opening_hours': \"\".join(response.xpath('//div/div/div/span/span/span/text()').extract()).strip(),\n 'lon': float(response.xpath('//head/script[9]').extract_first().split('\"coordinates\":[')[1].split(']')[0].split(',')[0]),\n 'lat': float(response.xpath('//head/script[9]').extract_first().split('\"coordinates\":[')[1].split(']')[0].split(',')[1]),\n }\n\n yield GeojsonPointItem(**properties)\n", "path": "locations/spiders/rubios.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nimport scrapy\nimport re\n\nfrom locations.items import GeojsonPointItem\n\n\nclass RubiosSpider(scrapy.Spider):\n name = \"rubios\"\n item_attributes = { 'brand': \"Rubio's\" }\n allowed_domains = ['rubios.com']\n start_urls = (\n 'https://www.rubios.com/sitemap.xml',\n )\n\n def parse(self, response):\n response.selector.remove_namespaces()\n city_urls = response.xpath('//url/loc/text()').extract()\n regex = re.compile(r'http\\S+rubios.com/restaurant-locations/\\S+/\\S+/\\S+')\n for path in city_urls:\n if re.search(regex, path):\n yield scrapy.Request(\n path.strip(),\n callback=self.parse_store,\n )\n\n def parse_store(self, response):\n\n properties = {\n 'name': response.xpath('//meta[@property=\"og:title\"]/@content').extract_first(),\n 'ref': response.url,\n 'addr_full': response.xpath('//meta[@property=\"og:street_address\"]/@content').extract_first(),\n 'city': response.xpath('//meta[@property=\"og:locality\"]/@content').extract_first(),\n 'state': response.xpath('//meta[@property=\"og:region\"]/@content').extract_first(),\n 'postcode': response.xpath('//meta[@property=\"og:postal_code\"]/@content').extract_first(),\n 'country': response.xpath('//meta[@property=\"og:country_name\"]/@content').extract_first(),\n 'phone': response.xpath('//@href[contains(.,\"tel:\")]').extract_first().replace('tel:', ''),\n 'website': response.url,\n 'opening_hours': ''.join(response.css('.oh-wrapper ::text').extract()),\n 'lon': response.xpath('//meta[@property=\"og:longitude\"]/@content').extract_first(),\n 'lat': response.xpath('//meta[@property=\"og:latitude\"]/@content').extract_first(),\n }\n\n yield GeojsonPointItem(**properties)\n", "path": "locations/spiders/rubios.py"}]}
| 966 | 683 |
gh_patches_debug_63976
|
rasdani/github-patches
|
git_diff
|
buildbot__buildbot-1214
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
New web plugin idea: a health indicator
This ticket is a migrated Trac ticket [2966](http://trac.buildbot.net/ticket/2966)
People contributed to the original ticket: `benoit.allard@...`, @tardyp, @rutsky, @unknown_contributor, @sa2ajj, @rutsky
Ticket created on: `Oct 24 2014`
Ticket last modified on: `Mar 19 2016`
---
I like to extract as much useful indicator from my builds as possible (time, but also amount of warnings, and such ...)
It would be cool to have a web plugin that could print the evolution of my indicators over time ! (Of course, I would have to configure which indicator I want to see plotted, maybe the kind of plot, and so on ...)
---
_Comment from_: @sa2ajj
_Date_: `Oct 24 2014`
Could you please elaborate or provide a more specific example?
I think it's related to the [[metrics support|http://docs.buildbot.net/latest/developer/metrics.html]], but without an example I can easily be wrong :)
---
_Comment from_: @Ben
_Date_: `Oct 24 2014`
I was more aiming at [[Statistics|http://docs.buildbot.net/latest/developer/cls-buildsteps.html?highlight=statistics#buildbot.process.buildstep.[[BuildStep]].hasStatistic]], but I just realized that
Note that statistics are not preserved after a build is complete.
So metrics is probably where we want to interface with the master.
I used to abuse Properties for that purpose ...
---
_Comment from_: @tardyp
_Date_: `Oct 24 2014`
Buildbot plugin system is really made for enabling such dashboards.
A web ui plugin is not technically restricted to creating a bunch of js file, it could also create a twisted service.
For me having the JS only use existing data api to query the data will be very inefficient. I think we could easily create a service, like a status service that registers to a bunch of mq events, and create statistics based on that.
I also had in mind that plugins could have some table in the DB they could use to store the data, or maybe use another db configuration with all schema + migration stuff separated.
---
_Comment from_: @tardyp
_Date_: `Oct 26 2014`
on IRC, sa2ajj talked about http://graphite.readthedocs.org/
He told us, he actually uses it at work, and has integration working with eigth.
Looking at the documentation, the first reaction is how to integrate this with multimaster, as graphite has its own db called whisper.
I haven't look too much deeply, but I think this is still feasible as a external tool. Probably this would be much cheaper than making our own metrics system inside buildbot.
An external graphite server could be setup, and watch for the (TBD) mq server. As there are messages for nearly every kind of activity that happens in buildbot, this is a good mean of making solid analysis of what is going on. Of course, this solution would not be fully integrated, as probably the UI would be external, but anyway, I think this is a possible cheap path.
@sa2ajj do you think it is possible? How would you estimate the cost of integration?
---
_Comment from_: @Ben
_Date_: `Oct 26 2014`
There are a lot of [[interfaces|http://graphite.readthedocs.org/en/latest/tools.html#visualization]], as well as data collectors / forwarders (same page, a bit higher) available for graphite. It looks like some of them are js-only ! Still way to go for a www-plugin !
---
_Comment from_: @sa2ajj
_Date_: `Oct 27 2014`
What I said was that we indeed use graphite, but I did not say we use it with Buildbot.
I have an oldish branch where I tried to publish metrics to graphite. However I stopped working on that when I realised that it's not very straightforward to implement it to support multi-master case.
If there's an interest, I can revive the branch or, at least, publish what I have (after rebasing the latest master) so other could comment in what direction my thinking worked.
---
_Comment from_: @unknown_contributor
_Date_: `Mar 19 2016`
+1 for this feature. I suggest to merge the statistic API with the [[stats module|https://github.com/buildbot/buildbot/tree/master/master/buildbot/statistics]] developed last year by my GSOC student. And add a default backend that store a subset of these stat in the main database that would enable the development of heath/stats visualization modules directly installed by default (using Highchart js lib for example)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pkg/setup.py`
Content:
```
1 #!/usr/bin/env python
2 #
3 # This file is part of Buildbot. Buildbot is free software: you can
4 # redistribute it and/or modify it under the terms of the GNU General Public
5 # License as published by the Free Software Foundation, version 2.
6 #
7 # This program is distributed in the hope that it will be useful, but WITHOUT
8 # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
9 # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
10 # details.
11 #
12 # You should have received a copy of the GNU General Public License along with
13 # this program; if not, write to the Free Software Foundation, Inc., 51
14 # Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
15 #
16 # Copyright Buildbot Team Members
17
18 from setuptools import setup
19 import buildbot_pkg
20
21 setup(
22 name='buildbot_pkg',
23 version=buildbot_pkg.getVersion("."),
24 description='Buildbot packaging tools',
25 author=u'Pierre Tardy',
26 author_email=u'[email protected]',
27 url='http://buildbot.net/',
28 license='GNU GPL',
29 py_modules=['buildbot_pkg'],
30 )
31
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pkg/setup.py b/pkg/setup.py
--- a/pkg/setup.py
+++ b/pkg/setup.py
@@ -19,7 +19,7 @@
import buildbot_pkg
setup(
- name='buildbot_pkg',
+ name='buildbot-pkg',
version=buildbot_pkg.getVersion("."),
description='Buildbot packaging tools',
author=u'Pierre Tardy',
|
{"golden_diff": "diff --git a/pkg/setup.py b/pkg/setup.py\n--- a/pkg/setup.py\n+++ b/pkg/setup.py\n@@ -19,7 +19,7 @@\n import buildbot_pkg\n \n setup(\n- name='buildbot_pkg',\n+ name='buildbot-pkg',\n version=buildbot_pkg.getVersion(\".\"),\n description='Buildbot packaging tools',\n author=u'Pierre Tardy',\n", "issue": "New web plugin idea: a health indicator\nThis ticket is a migrated Trac ticket [2966](http://trac.buildbot.net/ticket/2966)\n\nPeople contributed to the original ticket: `benoit.allard@...`, @tardyp, @rutsky, @unknown_contributor, @sa2ajj, @rutsky\nTicket created on: `Oct 24 2014`\nTicket last modified on: `Mar 19 2016`\n\n---\n\nI like to extract as much useful indicator from my builds as possible (time, but also amount of warnings, and such ...)\n\nIt would be cool to have a web plugin that could print the evolution of my indicators over time ! (Of course, I would have to configure which indicator I want to see plotted, maybe the kind of plot, and so on ...)\n\n\n---\n\n_Comment from_: @sa2ajj\n_Date_: `Oct 24 2014`\n\nCould you please elaborate or provide a more specific example?\n\nI think it's related to the [[metrics support|http://docs.buildbot.net/latest/developer/metrics.html]], but without an example I can easily be wrong :)\n\n---\n_Comment from_: @Ben\n_Date_: `Oct 24 2014`\n\nI was more aiming at [[Statistics|http://docs.buildbot.net/latest/developer/cls-buildsteps.html?highlight=statistics#buildbot.process.buildstep.[[BuildStep]].hasStatistic]], but I just realized that\n\n Note that statistics are not preserved after a build is complete.\n\nSo metrics is probably where we want to interface with the master.\n\nI used to abuse Properties for that purpose ...\n\n---\n_Comment from_: @tardyp\n_Date_: `Oct 24 2014`\n\nBuildbot plugin system is really made for enabling such dashboards.\n\nA web ui plugin is not technically restricted to creating a bunch of js file, it could also create a twisted service.\n\nFor me having the JS only use existing data api to query the data will be very inefficient. I think we could easily create a service, like a status service that registers to a bunch of mq events, and create statistics based on that.\n\nI also had in mind that plugins could have some table in the DB they could use to store the data, or maybe use another db configuration with all schema + migration stuff separated.\n\n\n---\n_Comment from_: @tardyp\n_Date_: `Oct 26 2014`\n\non IRC, sa2ajj talked about http://graphite.readthedocs.org/\n\nHe told us, he actually uses it at work, and has integration working with eigth.\n\nLooking at the documentation, the first reaction is how to integrate this with multimaster, as graphite has its own db called whisper.\nI haven't look too much deeply, but I think this is still feasible as a external tool. Probably this would be much cheaper than making our own metrics system inside buildbot.\n\nAn external graphite server could be setup, and watch for the (TBD) mq server. As there are messages for nearly every kind of activity that happens in buildbot, this is a good mean of making solid analysis of what is going on. Of course, this solution would not be fully integrated, as probably the UI would be external, but anyway, I think this is a possible cheap path.\n\n@sa2ajj do you think it is possible? How would you estimate the cost of integration?\n\n\n\n\n---\n_Comment from_: @Ben\n_Date_: `Oct 26 2014`\n\nThere are a lot of [[interfaces|http://graphite.readthedocs.org/en/latest/tools.html#visualization]], as well as data collectors / forwarders (same page, a bit higher) available for graphite. It looks like some of them are js-only ! Still way to go for a www-plugin !\n\n---\n_Comment from_: @sa2ajj\n_Date_: `Oct 27 2014`\n\nWhat I said was that we indeed use graphite, but I did not say we use it with Buildbot.\n\nI have an oldish branch where I tried to publish metrics to graphite. However I stopped working on that when I realised that it's not very straightforward to implement it to support multi-master case.\n\nIf there's an interest, I can revive the branch or, at least, publish what I have (after rebasing the latest master) so other could comment in what direction my thinking worked.\n\n---\n_Comment from_: @unknown_contributor\n_Date_: `Mar 19 2016`\n\n+1 for this feature. I suggest to merge the statistic API with the [[stats module|https://github.com/buildbot/buildbot/tree/master/master/buildbot/statistics]] developed last year by my GSOC student. And add a default backend that store a subset of these stat in the main database that would enable the development of heath/stats visualization modules directly installed by default (using Highchart js lib for example) \n\n\n", "before_files": [{"content": "#!/usr/bin/env python\n#\n# This file is part of Buildbot. Buildbot is free software: you can\n# redistribute it and/or modify it under the terms of the GNU General Public\n# License as published by the Free Software Foundation, version 2.\n#\n# This program is distributed in the hope that it will be useful, but WITHOUT\n# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS\n# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more\n# details.\n#\n# You should have received a copy of the GNU General Public License along with\n# this program; if not, write to the Free Software Foundation, Inc., 51\n# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n#\n# Copyright Buildbot Team Members\n\nfrom setuptools import setup\nimport buildbot_pkg\n\nsetup(\n name='buildbot_pkg',\n version=buildbot_pkg.getVersion(\".\"),\n description='Buildbot packaging tools',\n author=u'Pierre Tardy',\n author_email=u'[email protected]',\n url='http://buildbot.net/',\n license='GNU GPL',\n py_modules=['buildbot_pkg'],\n)\n", "path": "pkg/setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n#\n# This file is part of Buildbot. Buildbot is free software: you can\n# redistribute it and/or modify it under the terms of the GNU General Public\n# License as published by the Free Software Foundation, version 2.\n#\n# This program is distributed in the hope that it will be useful, but WITHOUT\n# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS\n# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more\n# details.\n#\n# You should have received a copy of the GNU General Public License along with\n# this program; if not, write to the Free Software Foundation, Inc., 51\n# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n#\n# Copyright Buildbot Team Members\n\nfrom setuptools import setup\nimport buildbot_pkg\n\nsetup(\n name='buildbot-pkg',\n version=buildbot_pkg.getVersion(\".\"),\n description='Buildbot packaging tools',\n author=u'Pierre Tardy',\n author_email=u'[email protected]',\n url='http://buildbot.net/',\n license='GNU GPL',\n py_modules=['buildbot_pkg'],\n)\n", "path": "pkg/setup.py"}]}
| 1,606 | 86 |
gh_patches_debug_20490
|
rasdani/github-patches
|
git_diff
|
openvinotoolkit__datumaro-1284
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Automatic generation of specs.json by running specs.py is broken
After merging https://github.com/openvinotoolkit/datumaro/pull/1268, `metadata` is newly introduced in `specs.json`, but the automated `specs.json` doesn't create `metadata`, and hence it breaks the lazy_load of importers.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/datumaro/plugins/specs.py`
Content:
```
1 # Copyright (C) 2023 Intel Corporation
2 #
3 # SPDX-License-Identifier: MIT
4
5 from pathlib import Path
6
7 from datumaro.components.lazy_plugin import get_extra_deps, get_lazy_plugin
8 from datumaro.util import parse_json_file
9
10 _SOURCE_PATH = Path(__file__).resolve()
11 _SOURCE_DIR = _SOURCE_PATH.parent
12 _SPECS_JSON_PATH = _SOURCE_DIR / "specs.json"
13
14
15 def get_lazy_plugins():
16 return [
17 plugin
18 for plugin in [
19 get_lazy_plugin(
20 spec["import_path"],
21 spec["plugin_name"],
22 spec["plugin_type"],
23 spec.get("extra_deps", []),
24 spec.get("metadata", {}),
25 )
26 for spec in parse_json_file(str(_SPECS_JSON_PATH))
27 ]
28 if plugin is not None
29 ]
30
31
32 if __name__ == "__main__":
33 from datumaro.components.environment import Environment
34 from datumaro.util import dump_json_file
35
36 env = Environment(use_lazy_import=False)
37 plugin_specs = []
38
39 def _enroll_to_plugin_specs(plugins, plugin_type):
40 global plugin_specs
41
42 for _, plugin in plugins.items():
43 mod = plugin.__module__
44 class_name = plugin.__name__
45 plugin_name = plugin.NAME
46 plugin_specs += [
47 {
48 "import_path": f"{mod}.{class_name}",
49 "plugin_name": plugin_name,
50 "plugin_type": plugin_type,
51 "extra_deps": get_extra_deps(plugin),
52 }
53 ]
54
55 _enroll_to_plugin_specs(env.extractors, "DatasetBase")
56 _enroll_to_plugin_specs(env.importers, "Importer")
57 _enroll_to_plugin_specs(env.launchers, "Launcher")
58 _enroll_to_plugin_specs(env.exporters, "Exporter")
59 _enroll_to_plugin_specs(env.generators, "DatasetGenerator")
60 _enroll_to_plugin_specs(env.transforms, "Transform")
61 _enroll_to_plugin_specs(env.validators, "Validator")
62
63 dump_json_file(
64 _SPECS_JSON_PATH,
65 sorted(plugin_specs, key=lambda spec: spec["import_path"]),
66 indent=True,
67 append_newline=True,
68 )
69
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/datumaro/plugins/specs.py b/src/datumaro/plugins/specs.py
--- a/src/datumaro/plugins/specs.py
+++ b/src/datumaro/plugins/specs.py
@@ -43,14 +43,18 @@
mod = plugin.__module__
class_name = plugin.__name__
plugin_name = plugin.NAME
- plugin_specs += [
- {
- "import_path": f"{mod}.{class_name}",
- "plugin_name": plugin_name,
- "plugin_type": plugin_type,
- "extra_deps": get_extra_deps(plugin),
- }
- ]
+ extra_deps = get_extra_deps(plugin)
+ plugin_spec = {
+ "import_path": f"{mod}.{class_name}",
+ "plugin_name": plugin_name,
+ "plugin_type": plugin_type,
+ }
+ if extra_deps:
+ plugin_spec["extra_deps"] = extra_deps
+ # Setting metadata for importers
+ if hasattr(plugin, "get_file_extensions"):
+ plugin_spec["metadata"] = {"file_extensions": sorted(plugin.get_file_extensions())}
+ plugin_specs.append(plugin_spec)
_enroll_to_plugin_specs(env.extractors, "DatasetBase")
_enroll_to_plugin_specs(env.importers, "Importer")
|
{"golden_diff": "diff --git a/src/datumaro/plugins/specs.py b/src/datumaro/plugins/specs.py\n--- a/src/datumaro/plugins/specs.py\n+++ b/src/datumaro/plugins/specs.py\n@@ -43,14 +43,18 @@\n mod = plugin.__module__\n class_name = plugin.__name__\n plugin_name = plugin.NAME\n- plugin_specs += [\n- {\n- \"import_path\": f\"{mod}.{class_name}\",\n- \"plugin_name\": plugin_name,\n- \"plugin_type\": plugin_type,\n- \"extra_deps\": get_extra_deps(plugin),\n- }\n- ]\n+ extra_deps = get_extra_deps(plugin)\n+ plugin_spec = {\n+ \"import_path\": f\"{mod}.{class_name}\",\n+ \"plugin_name\": plugin_name,\n+ \"plugin_type\": plugin_type,\n+ }\n+ if extra_deps:\n+ plugin_spec[\"extra_deps\"] = extra_deps\n+ # Setting metadata for importers\n+ if hasattr(plugin, \"get_file_extensions\"):\n+ plugin_spec[\"metadata\"] = {\"file_extensions\": sorted(plugin.get_file_extensions())}\n+ plugin_specs.append(plugin_spec)\n \n _enroll_to_plugin_specs(env.extractors, \"DatasetBase\")\n _enroll_to_plugin_specs(env.importers, \"Importer\")\n", "issue": "Automatic generation of specs.json by running specs.py is broken\nAfter merging https://github.com/openvinotoolkit/datumaro/pull/1268, `metadata` is newly introduced in `specs.json`, but the automated `specs.json` doesn't create `metadata`, and hence it breaks the lazy_load of importers.\n", "before_files": [{"content": "# Copyright (C) 2023 Intel Corporation\n#\n# SPDX-License-Identifier: MIT\n\nfrom pathlib import Path\n\nfrom datumaro.components.lazy_plugin import get_extra_deps, get_lazy_plugin\nfrom datumaro.util import parse_json_file\n\n_SOURCE_PATH = Path(__file__).resolve()\n_SOURCE_DIR = _SOURCE_PATH.parent\n_SPECS_JSON_PATH = _SOURCE_DIR / \"specs.json\"\n\n\ndef get_lazy_plugins():\n return [\n plugin\n for plugin in [\n get_lazy_plugin(\n spec[\"import_path\"],\n spec[\"plugin_name\"],\n spec[\"plugin_type\"],\n spec.get(\"extra_deps\", []),\n spec.get(\"metadata\", {}),\n )\n for spec in parse_json_file(str(_SPECS_JSON_PATH))\n ]\n if plugin is not None\n ]\n\n\nif __name__ == \"__main__\":\n from datumaro.components.environment import Environment\n from datumaro.util import dump_json_file\n\n env = Environment(use_lazy_import=False)\n plugin_specs = []\n\n def _enroll_to_plugin_specs(plugins, plugin_type):\n global plugin_specs\n\n for _, plugin in plugins.items():\n mod = plugin.__module__\n class_name = plugin.__name__\n plugin_name = plugin.NAME\n plugin_specs += [\n {\n \"import_path\": f\"{mod}.{class_name}\",\n \"plugin_name\": plugin_name,\n \"plugin_type\": plugin_type,\n \"extra_deps\": get_extra_deps(plugin),\n }\n ]\n\n _enroll_to_plugin_specs(env.extractors, \"DatasetBase\")\n _enroll_to_plugin_specs(env.importers, \"Importer\")\n _enroll_to_plugin_specs(env.launchers, \"Launcher\")\n _enroll_to_plugin_specs(env.exporters, \"Exporter\")\n _enroll_to_plugin_specs(env.generators, \"DatasetGenerator\")\n _enroll_to_plugin_specs(env.transforms, \"Transform\")\n _enroll_to_plugin_specs(env.validators, \"Validator\")\n\n dump_json_file(\n _SPECS_JSON_PATH,\n sorted(plugin_specs, key=lambda spec: spec[\"import_path\"]),\n indent=True,\n append_newline=True,\n )\n", "path": "src/datumaro/plugins/specs.py"}], "after_files": [{"content": "# Copyright (C) 2023 Intel Corporation\n#\n# SPDX-License-Identifier: MIT\n\nfrom pathlib import Path\n\nfrom datumaro.components.lazy_plugin import get_extra_deps, get_lazy_plugin\nfrom datumaro.util import parse_json_file\n\n_SOURCE_PATH = Path(__file__).resolve()\n_SOURCE_DIR = _SOURCE_PATH.parent\n_SPECS_JSON_PATH = _SOURCE_DIR / \"specs.json\"\n\n\ndef get_lazy_plugins():\n return [\n plugin\n for plugin in [\n get_lazy_plugin(\n spec[\"import_path\"],\n spec[\"plugin_name\"],\n spec[\"plugin_type\"],\n spec.get(\"extra_deps\", []),\n spec.get(\"metadata\", {}),\n )\n for spec in parse_json_file(str(_SPECS_JSON_PATH))\n ]\n if plugin is not None\n ]\n\n\nif __name__ == \"__main__\":\n from datumaro.components.environment import Environment\n from datumaro.util import dump_json_file\n\n env = Environment(use_lazy_import=False)\n plugin_specs = []\n\n def _enroll_to_plugin_specs(plugins, plugin_type):\n global plugin_specs\n\n for _, plugin in plugins.items():\n mod = plugin.__module__\n class_name = plugin.__name__\n plugin_name = plugin.NAME\n extra_deps = get_extra_deps(plugin)\n plugin_spec = {\n \"import_path\": f\"{mod}.{class_name}\",\n \"plugin_name\": plugin_name,\n \"plugin_type\": plugin_type,\n }\n if extra_deps:\n plugin_spec[\"extra_deps\"] = extra_deps\n # Setting metadata for importers\n if hasattr(plugin, \"get_file_extensions\"):\n plugin_spec[\"metadata\"] = {\"file_extensions\": sorted(plugin.get_file_extensions())}\n plugin_specs.append(plugin_spec)\n\n _enroll_to_plugin_specs(env.extractors, \"DatasetBase\")\n _enroll_to_plugin_specs(env.importers, \"Importer\")\n _enroll_to_plugin_specs(env.launchers, \"Launcher\")\n _enroll_to_plugin_specs(env.exporters, \"Exporter\")\n _enroll_to_plugin_specs(env.generators, \"DatasetGenerator\")\n _enroll_to_plugin_specs(env.transforms, \"Transform\")\n _enroll_to_plugin_specs(env.validators, \"Validator\")\n\n dump_json_file(\n _SPECS_JSON_PATH,\n sorted(plugin_specs, key=lambda spec: spec[\"import_path\"]),\n indent=True,\n append_newline=True,\n )\n", "path": "src/datumaro/plugins/specs.py"}]}
| 914 | 282 |
gh_patches_debug_4520
|
rasdani/github-patches
|
git_diff
|
pytorch__vision-357
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Error closing: 'NoneType' object has no attribute 'close' for ImageNet reading
When loading ImageNet, I get the following debug messages
`Error closing: 'NoneType' object has no attribute 'close' for ImageNet reading`
message called from https://github.com/pytorch/vision/blob/45dab0e4528333332f07ab1979e006857563cf99/torchvision/datasets/folder.py#L46
caused by
https://github.com/python-pillow/Pillow/blob/7d8c0d9e39d5ed79ea6aa590b9cbff5b0d17248c/PIL/Image.py#L573
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `torchvision/datasets/folder.py`
Content:
```
1 import torch.utils.data as data
2
3 from PIL import Image
4 import os
5 import os.path
6
7 IMG_EXTENSIONS = ['.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm']
8
9
10 def is_image_file(filename):
11 """Checks if a file is an image.
12
13 Args:
14 filename (string): path to a file
15
16 Returns:
17 bool: True if the filename ends with a known image extension
18 """
19 filename_lower = filename.lower()
20 return any(filename_lower.endswith(ext) for ext in IMG_EXTENSIONS)
21
22
23 def find_classes(dir):
24 classes = [d for d in os.listdir(dir) if os.path.isdir(os.path.join(dir, d))]
25 classes.sort()
26 class_to_idx = {classes[i]: i for i in range(len(classes))}
27 return classes, class_to_idx
28
29
30 def make_dataset(dir, class_to_idx):
31 images = []
32 dir = os.path.expanduser(dir)
33 for target in sorted(os.listdir(dir)):
34 d = os.path.join(dir, target)
35 if not os.path.isdir(d):
36 continue
37
38 for root, _, fnames in sorted(os.walk(d)):
39 for fname in sorted(fnames):
40 if is_image_file(fname):
41 path = os.path.join(root, fname)
42 item = (path, class_to_idx[target])
43 images.append(item)
44
45 return images
46
47
48 def pil_loader(path):
49 # open path as file to avoid ResourceWarning (https://github.com/python-pillow/Pillow/issues/835)
50 with open(path, 'rb') as f:
51 with Image.open(f) as img:
52 return img.convert('RGB')
53
54
55 def accimage_loader(path):
56 import accimage
57 try:
58 return accimage.Image(path)
59 except IOError:
60 # Potentially a decoding problem, fall back to PIL.Image
61 return pil_loader(path)
62
63
64 def default_loader(path):
65 from torchvision import get_image_backend
66 if get_image_backend() == 'accimage':
67 return accimage_loader(path)
68 else:
69 return pil_loader(path)
70
71
72 class ImageFolder(data.Dataset):
73 """A generic data loader where the images are arranged in this way: ::
74
75 root/dog/xxx.png
76 root/dog/xxy.png
77 root/dog/xxz.png
78
79 root/cat/123.png
80 root/cat/nsdf3.png
81 root/cat/asd932_.png
82
83 Args:
84 root (string): Root directory path.
85 transform (callable, optional): A function/transform that takes in an PIL image
86 and returns a transformed version. E.g, ``transforms.RandomCrop``
87 target_transform (callable, optional): A function/transform that takes in the
88 target and transforms it.
89 loader (callable, optional): A function to load an image given its path.
90
91 Attributes:
92 classes (list): List of the class names.
93 class_to_idx (dict): Dict with items (class_name, class_index).
94 imgs (list): List of (image path, class_index) tuples
95 """
96
97 def __init__(self, root, transform=None, target_transform=None,
98 loader=default_loader):
99 classes, class_to_idx = find_classes(root)
100 imgs = make_dataset(root, class_to_idx)
101 if len(imgs) == 0:
102 raise(RuntimeError("Found 0 images in subfolders of: " + root + "\n"
103 "Supported image extensions are: " + ",".join(IMG_EXTENSIONS)))
104
105 self.root = root
106 self.imgs = imgs
107 self.classes = classes
108 self.class_to_idx = class_to_idx
109 self.transform = transform
110 self.target_transform = target_transform
111 self.loader = loader
112
113 def __getitem__(self, index):
114 """
115 Args:
116 index (int): Index
117
118 Returns:
119 tuple: (image, target) where target is class_index of the target class.
120 """
121 path, target = self.imgs[index]
122 img = self.loader(path)
123 if self.transform is not None:
124 img = self.transform(img)
125 if self.target_transform is not None:
126 target = self.target_transform(target)
127
128 return img, target
129
130 def __len__(self):
131 return len(self.imgs)
132
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/torchvision/datasets/folder.py b/torchvision/datasets/folder.py
--- a/torchvision/datasets/folder.py
+++ b/torchvision/datasets/folder.py
@@ -48,8 +48,8 @@
def pil_loader(path):
# open path as file to avoid ResourceWarning (https://github.com/python-pillow/Pillow/issues/835)
with open(path, 'rb') as f:
- with Image.open(f) as img:
- return img.convert('RGB')
+ img = Image.open(f)
+ return img.convert('RGB')
def accimage_loader(path):
|
{"golden_diff": "diff --git a/torchvision/datasets/folder.py b/torchvision/datasets/folder.py\n--- a/torchvision/datasets/folder.py\n+++ b/torchvision/datasets/folder.py\n@@ -48,8 +48,8 @@\n def pil_loader(path):\n # open path as file to avoid ResourceWarning (https://github.com/python-pillow/Pillow/issues/835)\n with open(path, 'rb') as f:\n- with Image.open(f) as img:\n- return img.convert('RGB')\n+ img = Image.open(f)\n+ return img.convert('RGB')\n \n \n def accimage_loader(path):\n", "issue": "Error closing: 'NoneType' object has no attribute 'close' for ImageNet reading\nWhen loading ImageNet, I get the following debug messages\r\n`Error closing: 'NoneType' object has no attribute 'close' for ImageNet reading`\r\nmessage called from https://github.com/pytorch/vision/blob/45dab0e4528333332f07ab1979e006857563cf99/torchvision/datasets/folder.py#L46\r\n\r\ncaused by\r\nhttps://github.com/python-pillow/Pillow/blob/7d8c0d9e39d5ed79ea6aa590b9cbff5b0d17248c/PIL/Image.py#L573\n", "before_files": [{"content": "import torch.utils.data as data\n\nfrom PIL import Image\nimport os\nimport os.path\n\nIMG_EXTENSIONS = ['.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm']\n\n\ndef is_image_file(filename):\n \"\"\"Checks if a file is an image.\n\n Args:\n filename (string): path to a file\n\n Returns:\n bool: True if the filename ends with a known image extension\n \"\"\"\n filename_lower = filename.lower()\n return any(filename_lower.endswith(ext) for ext in IMG_EXTENSIONS)\n\n\ndef find_classes(dir):\n classes = [d for d in os.listdir(dir) if os.path.isdir(os.path.join(dir, d))]\n classes.sort()\n class_to_idx = {classes[i]: i for i in range(len(classes))}\n return classes, class_to_idx\n\n\ndef make_dataset(dir, class_to_idx):\n images = []\n dir = os.path.expanduser(dir)\n for target in sorted(os.listdir(dir)):\n d = os.path.join(dir, target)\n if not os.path.isdir(d):\n continue\n\n for root, _, fnames in sorted(os.walk(d)):\n for fname in sorted(fnames):\n if is_image_file(fname):\n path = os.path.join(root, fname)\n item = (path, class_to_idx[target])\n images.append(item)\n\n return images\n\n\ndef pil_loader(path):\n # open path as file to avoid ResourceWarning (https://github.com/python-pillow/Pillow/issues/835)\n with open(path, 'rb') as f:\n with Image.open(f) as img:\n return img.convert('RGB')\n\n\ndef accimage_loader(path):\n import accimage\n try:\n return accimage.Image(path)\n except IOError:\n # Potentially a decoding problem, fall back to PIL.Image\n return pil_loader(path)\n\n\ndef default_loader(path):\n from torchvision import get_image_backend\n if get_image_backend() == 'accimage':\n return accimage_loader(path)\n else:\n return pil_loader(path)\n\n\nclass ImageFolder(data.Dataset):\n \"\"\"A generic data loader where the images are arranged in this way: ::\n\n root/dog/xxx.png\n root/dog/xxy.png\n root/dog/xxz.png\n\n root/cat/123.png\n root/cat/nsdf3.png\n root/cat/asd932_.png\n\n Args:\n root (string): Root directory path.\n transform (callable, optional): A function/transform that takes in an PIL image\n and returns a transformed version. E.g, ``transforms.RandomCrop``\n target_transform (callable, optional): A function/transform that takes in the\n target and transforms it.\n loader (callable, optional): A function to load an image given its path.\n\n Attributes:\n classes (list): List of the class names.\n class_to_idx (dict): Dict with items (class_name, class_index).\n imgs (list): List of (image path, class_index) tuples\n \"\"\"\n\n def __init__(self, root, transform=None, target_transform=None,\n loader=default_loader):\n classes, class_to_idx = find_classes(root)\n imgs = make_dataset(root, class_to_idx)\n if len(imgs) == 0:\n raise(RuntimeError(\"Found 0 images in subfolders of: \" + root + \"\\n\"\n \"Supported image extensions are: \" + \",\".join(IMG_EXTENSIONS)))\n\n self.root = root\n self.imgs = imgs\n self.classes = classes\n self.class_to_idx = class_to_idx\n self.transform = transform\n self.target_transform = target_transform\n self.loader = loader\n\n def __getitem__(self, index):\n \"\"\"\n Args:\n index (int): Index\n\n Returns:\n tuple: (image, target) where target is class_index of the target class.\n \"\"\"\n path, target = self.imgs[index]\n img = self.loader(path)\n if self.transform is not None:\n img = self.transform(img)\n if self.target_transform is not None:\n target = self.target_transform(target)\n\n return img, target\n\n def __len__(self):\n return len(self.imgs)\n", "path": "torchvision/datasets/folder.py"}], "after_files": [{"content": "import torch.utils.data as data\n\nfrom PIL import Image\nimport os\nimport os.path\n\nIMG_EXTENSIONS = ['.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm']\n\n\ndef is_image_file(filename):\n \"\"\"Checks if a file is an image.\n\n Args:\n filename (string): path to a file\n\n Returns:\n bool: True if the filename ends with a known image extension\n \"\"\"\n filename_lower = filename.lower()\n return any(filename_lower.endswith(ext) for ext in IMG_EXTENSIONS)\n\n\ndef find_classes(dir):\n classes = [d for d in os.listdir(dir) if os.path.isdir(os.path.join(dir, d))]\n classes.sort()\n class_to_idx = {classes[i]: i for i in range(len(classes))}\n return classes, class_to_idx\n\n\ndef make_dataset(dir, class_to_idx):\n images = []\n dir = os.path.expanduser(dir)\n for target in sorted(os.listdir(dir)):\n d = os.path.join(dir, target)\n if not os.path.isdir(d):\n continue\n\n for root, _, fnames in sorted(os.walk(d)):\n for fname in sorted(fnames):\n if is_image_file(fname):\n path = os.path.join(root, fname)\n item = (path, class_to_idx[target])\n images.append(item)\n\n return images\n\n\ndef pil_loader(path):\n # open path as file to avoid ResourceWarning (https://github.com/python-pillow/Pillow/issues/835)\n with open(path, 'rb') as f:\n img = Image.open(f)\n return img.convert('RGB')\n\n\ndef accimage_loader(path):\n import accimage\n try:\n return accimage.Image(path)\n except IOError:\n # Potentially a decoding problem, fall back to PIL.Image\n return pil_loader(path)\n\n\ndef default_loader(path):\n from torchvision import get_image_backend\n if get_image_backend() == 'accimage':\n return accimage_loader(path)\n else:\n return pil_loader(path)\n\n\nclass ImageFolder(data.Dataset):\n \"\"\"A generic data loader where the images are arranged in this way: ::\n\n root/dog/xxx.png\n root/dog/xxy.png\n root/dog/xxz.png\n\n root/cat/123.png\n root/cat/nsdf3.png\n root/cat/asd932_.png\n\n Args:\n root (string): Root directory path.\n transform (callable, optional): A function/transform that takes in an PIL image\n and returns a transformed version. E.g, ``transforms.RandomCrop``\n target_transform (callable, optional): A function/transform that takes in the\n target and transforms it.\n loader (callable, optional): A function to load an image given its path.\n\n Attributes:\n classes (list): List of the class names.\n class_to_idx (dict): Dict with items (class_name, class_index).\n imgs (list): List of (image path, class_index) tuples\n \"\"\"\n\n def __init__(self, root, transform=None, target_transform=None,\n loader=default_loader):\n classes, class_to_idx = find_classes(root)\n imgs = make_dataset(root, class_to_idx)\n if len(imgs) == 0:\n raise(RuntimeError(\"Found 0 images in subfolders of: \" + root + \"\\n\"\n \"Supported image extensions are: \" + \",\".join(IMG_EXTENSIONS)))\n\n self.root = root\n self.imgs = imgs\n self.classes = classes\n self.class_to_idx = class_to_idx\n self.transform = transform\n self.target_transform = target_transform\n self.loader = loader\n\n def __getitem__(self, index):\n \"\"\"\n Args:\n index (int): Index\n\n Returns:\n tuple: (image, target) where target is class_index of the target class.\n \"\"\"\n path, target = self.imgs[index]\n img = self.loader(path)\n if self.transform is not None:\n img = self.transform(img)\n if self.target_transform is not None:\n target = self.target_transform(target)\n\n return img, target\n\n def __len__(self):\n return len(self.imgs)\n", "path": "torchvision/datasets/folder.py"}]}
| 1,636 | 139 |
gh_patches_debug_14940
|
rasdani/github-patches
|
git_diff
|
scikit-image__scikit-image-1348
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
unwrap_phase SegFaults when passed a masked array with all elements masked
The **unwrap_phase** function in the skimage.restoration namespace Segmentation faults when passed a masked array with all elements masked. For example:
``` Python
import numpy as np
from skimage.restoration import unwrap_phase
image = np.ma.arange(100, dtype='float32').reshape(10, 10)
image[:] = np.ma.masked
unwrap_phase(image)
```
The Segmentation fault occurs in the find_pivot function in unwrap_2d_ljmu.c which is called from quicker_sort. The issue is that the routine is attempting to sort a length 0 array. A check should be added in the unwrap2D (and related functions) to see if params.no_of_edges is not 0 (or 1?)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `skimage/restoration/unwrap.py`
Content:
```
1 import numpy as np
2 import warnings
3 from six import string_types
4
5 from ._unwrap_1d import unwrap_1d
6 from ._unwrap_2d import unwrap_2d
7 from ._unwrap_3d import unwrap_3d
8
9
10 def unwrap_phase(image, wrap_around=False, seed=None):
11 '''Recover the original from a wrapped phase image.
12
13 From an image wrapped to lie in the interval [-pi, pi), recover the
14 original, unwrapped image.
15
16 Parameters
17 ----------
18 image : 1D, 2D or 3D ndarray of floats, optionally a masked array
19 The values should be in the range [-pi, pi). If a masked array is
20 provided, the masked entries will not be changed, and their values
21 will not be used to guide the unwrapping of neighboring, unmasked
22 values. Masked 1D arrays are not allowed, and will raise a
23 `ValueError`.
24 wrap_around : bool or sequence of bool, optional
25 When an element of the sequence is `True`, the unwrapping process
26 will regard the edges along the corresponding axis of the image to be
27 connected and use this connectivity to guide the phase unwrapping
28 process. If only a single boolean is given, it will apply to all axes.
29 Wrap around is not supported for 1D arrays.
30 seed : int, optional
31 Unwrapping 2D or 3D images uses random initialization. This sets the
32 seed of the PRNG to achieve deterministic behavior.
33
34 Returns
35 -------
36 image_unwrapped : array_like, double
37 Unwrapped image of the same shape as the input. If the input `image`
38 was a masked array, the mask will be preserved.
39
40 Raises
41 ------
42 ValueError
43 If called with a masked 1D array or called with a 1D array and
44 ``wrap_around=True``.
45
46 Examples
47 --------
48 >>> c0, c1 = np.ogrid[-1:1:128j, -1:1:128j]
49 >>> image = 12 * np.pi * np.exp(-(c0**2 + c1**2))
50 >>> image_wrapped = np.angle(np.exp(1j * image))
51 >>> image_unwrapped = unwrap_phase(image_wrapped)
52 >>> np.std(image_unwrapped - image) < 1e-6 # A constant offset is normal
53 True
54
55 References
56 ----------
57 .. [1] Miguel Arevallilo Herraez, David R. Burton, Michael J. Lalor,
58 and Munther A. Gdeisat, "Fast two-dimensional phase-unwrapping
59 algorithm based on sorting by reliability following a noncontinuous
60 path", Journal Applied Optics, Vol. 41, No. 35 (2002) 7437,
61 .. [2] Abdul-Rahman, H., Gdeisat, M., Burton, D., & Lalor, M., "Fast
62 three-dimensional phase-unwrapping algorithm based on sorting by
63 reliability following a non-continuous path. In W. Osten,
64 C. Gorecki, & E. L. Novak (Eds.), Optical Metrology (2005) 32--40,
65 International Society for Optics and Photonics.
66 '''
67 if image.ndim not in (1, 2, 3):
68 raise ValueError('Image must be 1, 2, or 3 dimensional')
69 if isinstance(wrap_around, bool):
70 wrap_around = [wrap_around] * image.ndim
71 elif (hasattr(wrap_around, '__getitem__')
72 and not isinstance(wrap_around, string_types)):
73 if len(wrap_around) != image.ndim:
74 raise ValueError('Length of `wrap_around` must equal the '
75 'dimensionality of image')
76 wrap_around = [bool(wa) for wa in wrap_around]
77 else:
78 raise ValueError('`wrap_around` must be a bool or a sequence with '
79 'length equal to the dimensionality of image')
80 if image.ndim == 1:
81 if np.ma.isMaskedArray(image):
82 raise ValueError('1D masked images cannot be unwrapped')
83 if wrap_around[0]:
84 raise ValueError('`wrap_around` is not supported for 1D images')
85 if image.ndim in (2, 3) and 1 in image.shape:
86 warnings.warn('Image has a length 1 dimension. Consider using an '
87 'array of lower dimensionality to use a more efficient '
88 'algorithm')
89
90 if np.ma.isMaskedArray(image):
91 mask = np.require(image.mask, np.uint8, ['C'])
92 image = image.data
93 else:
94 mask = np.zeros_like(image, dtype=np.uint8, order='C')
95
96 image_not_masked = np.asarray(image, dtype=np.double, order='C')
97 image_unwrapped = np.empty_like(image, dtype=np.double, order='C')
98
99 if image.ndim == 1:
100 unwrap_1d(image_not_masked, image_unwrapped)
101 elif image.ndim == 2:
102 unwrap_2d(image_not_masked, mask, image_unwrapped,
103 wrap_around, seed)
104 elif image.ndim == 3:
105 unwrap_3d(image_not_masked, mask, image_unwrapped,
106 wrap_around, seed)
107
108 if np.ma.isMaskedArray(image):
109 return np.ma.array(image_unwrapped, mask=mask)
110 else:
111 return image_unwrapped
112
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/skimage/restoration/unwrap.py b/skimage/restoration/unwrap.py
--- a/skimage/restoration/unwrap.py
+++ b/skimage/restoration/unwrap.py
@@ -88,13 +88,14 @@
'algorithm')
if np.ma.isMaskedArray(image):
- mask = np.require(image.mask, np.uint8, ['C'])
- image = image.data
+ mask = np.require(np.ma.getmaskarray(image), np.uint8, ['C'])
else:
mask = np.zeros_like(image, dtype=np.uint8, order='C')
- image_not_masked = np.asarray(image, dtype=np.double, order='C')
- image_unwrapped = np.empty_like(image, dtype=np.double, order='C')
+ image_not_masked = np.asarray(
+ np.ma.getdata(image), dtype=np.double, order='C')
+ image_unwrapped = np.empty_like(image, dtype=np.double, order='C',
+ subok=False)
if image.ndim == 1:
unwrap_1d(image_not_masked, image_unwrapped)
|
{"golden_diff": "diff --git a/skimage/restoration/unwrap.py b/skimage/restoration/unwrap.py\n--- a/skimage/restoration/unwrap.py\n+++ b/skimage/restoration/unwrap.py\n@@ -88,13 +88,14 @@\n 'algorithm')\n \n if np.ma.isMaskedArray(image):\n- mask = np.require(image.mask, np.uint8, ['C'])\n- image = image.data\n+ mask = np.require(np.ma.getmaskarray(image), np.uint8, ['C'])\n else:\n mask = np.zeros_like(image, dtype=np.uint8, order='C')\n \n- image_not_masked = np.asarray(image, dtype=np.double, order='C')\n- image_unwrapped = np.empty_like(image, dtype=np.double, order='C')\n+ image_not_masked = np.asarray(\n+ np.ma.getdata(image), dtype=np.double, order='C')\n+ image_unwrapped = np.empty_like(image, dtype=np.double, order='C',\n+ subok=False)\n \n if image.ndim == 1:\n unwrap_1d(image_not_masked, image_unwrapped)\n", "issue": "unwrap_phase SegFaults when passed a masked array with all elements masked\nThe **unwrap_phase** function in the skimage.restoration namespace Segmentation faults when passed a masked array with all elements masked. For example:\n\n``` Python\nimport numpy as np\nfrom skimage.restoration import unwrap_phase\nimage = np.ma.arange(100, dtype='float32').reshape(10, 10)\nimage[:] = np.ma.masked\nunwrap_phase(image)\n```\n\nThe Segmentation fault occurs in the find_pivot function in unwrap_2d_ljmu.c which is called from quicker_sort. The issue is that the routine is attempting to sort a length 0 array. A check should be added in the unwrap2D (and related functions) to see if params.no_of_edges is not 0 (or 1?)\n\n", "before_files": [{"content": "import numpy as np\nimport warnings\nfrom six import string_types\n\nfrom ._unwrap_1d import unwrap_1d\nfrom ._unwrap_2d import unwrap_2d\nfrom ._unwrap_3d import unwrap_3d\n\n\ndef unwrap_phase(image, wrap_around=False, seed=None):\n '''Recover the original from a wrapped phase image.\n\n From an image wrapped to lie in the interval [-pi, pi), recover the\n original, unwrapped image.\n\n Parameters\n ----------\n image : 1D, 2D or 3D ndarray of floats, optionally a masked array\n The values should be in the range [-pi, pi). If a masked array is\n provided, the masked entries will not be changed, and their values\n will not be used to guide the unwrapping of neighboring, unmasked\n values. Masked 1D arrays are not allowed, and will raise a\n `ValueError`.\n wrap_around : bool or sequence of bool, optional\n When an element of the sequence is `True`, the unwrapping process\n will regard the edges along the corresponding axis of the image to be\n connected and use this connectivity to guide the phase unwrapping\n process. If only a single boolean is given, it will apply to all axes.\n Wrap around is not supported for 1D arrays.\n seed : int, optional\n Unwrapping 2D or 3D images uses random initialization. This sets the\n seed of the PRNG to achieve deterministic behavior.\n\n Returns\n -------\n image_unwrapped : array_like, double\n Unwrapped image of the same shape as the input. If the input `image`\n was a masked array, the mask will be preserved.\n\n Raises\n ------\n ValueError\n If called with a masked 1D array or called with a 1D array and\n ``wrap_around=True``.\n\n Examples\n --------\n >>> c0, c1 = np.ogrid[-1:1:128j, -1:1:128j]\n >>> image = 12 * np.pi * np.exp(-(c0**2 + c1**2))\n >>> image_wrapped = np.angle(np.exp(1j * image))\n >>> image_unwrapped = unwrap_phase(image_wrapped)\n >>> np.std(image_unwrapped - image) < 1e-6 # A constant offset is normal\n True\n\n References\n ----------\n .. [1] Miguel Arevallilo Herraez, David R. Burton, Michael J. Lalor,\n and Munther A. Gdeisat, \"Fast two-dimensional phase-unwrapping\n algorithm based on sorting by reliability following a noncontinuous\n path\", Journal Applied Optics, Vol. 41, No. 35 (2002) 7437,\n .. [2] Abdul-Rahman, H., Gdeisat, M., Burton, D., & Lalor, M., \"Fast\n three-dimensional phase-unwrapping algorithm based on sorting by\n reliability following a non-continuous path. In W. Osten,\n C. Gorecki, & E. L. Novak (Eds.), Optical Metrology (2005) 32--40,\n International Society for Optics and Photonics.\n '''\n if image.ndim not in (1, 2, 3):\n raise ValueError('Image must be 1, 2, or 3 dimensional')\n if isinstance(wrap_around, bool):\n wrap_around = [wrap_around] * image.ndim\n elif (hasattr(wrap_around, '__getitem__')\n and not isinstance(wrap_around, string_types)):\n if len(wrap_around) != image.ndim:\n raise ValueError('Length of `wrap_around` must equal the '\n 'dimensionality of image')\n wrap_around = [bool(wa) for wa in wrap_around]\n else:\n raise ValueError('`wrap_around` must be a bool or a sequence with '\n 'length equal to the dimensionality of image')\n if image.ndim == 1:\n if np.ma.isMaskedArray(image):\n raise ValueError('1D masked images cannot be unwrapped')\n if wrap_around[0]:\n raise ValueError('`wrap_around` is not supported for 1D images')\n if image.ndim in (2, 3) and 1 in image.shape:\n warnings.warn('Image has a length 1 dimension. Consider using an '\n 'array of lower dimensionality to use a more efficient '\n 'algorithm')\n\n if np.ma.isMaskedArray(image):\n mask = np.require(image.mask, np.uint8, ['C'])\n image = image.data\n else:\n mask = np.zeros_like(image, dtype=np.uint8, order='C')\n\n image_not_masked = np.asarray(image, dtype=np.double, order='C')\n image_unwrapped = np.empty_like(image, dtype=np.double, order='C')\n\n if image.ndim == 1:\n unwrap_1d(image_not_masked, image_unwrapped)\n elif image.ndim == 2:\n unwrap_2d(image_not_masked, mask, image_unwrapped,\n wrap_around, seed)\n elif image.ndim == 3:\n unwrap_3d(image_not_masked, mask, image_unwrapped,\n wrap_around, seed)\n\n if np.ma.isMaskedArray(image):\n return np.ma.array(image_unwrapped, mask=mask)\n else:\n return image_unwrapped\n", "path": "skimage/restoration/unwrap.py"}], "after_files": [{"content": "import numpy as np\nimport warnings\nfrom six import string_types\n\nfrom ._unwrap_1d import unwrap_1d\nfrom ._unwrap_2d import unwrap_2d\nfrom ._unwrap_3d import unwrap_3d\n\n\ndef unwrap_phase(image, wrap_around=False, seed=None):\n '''Recover the original from a wrapped phase image.\n\n From an image wrapped to lie in the interval [-pi, pi), recover the\n original, unwrapped image.\n\n Parameters\n ----------\n image : 1D, 2D or 3D ndarray of floats, optionally a masked array\n The values should be in the range [-pi, pi). If a masked array is\n provided, the masked entries will not be changed, and their values\n will not be used to guide the unwrapping of neighboring, unmasked\n values. Masked 1D arrays are not allowed, and will raise a\n `ValueError`.\n wrap_around : bool or sequence of bool, optional\n When an element of the sequence is `True`, the unwrapping process\n will regard the edges along the corresponding axis of the image to be\n connected and use this connectivity to guide the phase unwrapping\n process. If only a single boolean is given, it will apply to all axes.\n Wrap around is not supported for 1D arrays.\n seed : int, optional\n Unwrapping 2D or 3D images uses random initialization. This sets the\n seed of the PRNG to achieve deterministic behavior.\n\n Returns\n -------\n image_unwrapped : array_like, double\n Unwrapped image of the same shape as the input. If the input `image`\n was a masked array, the mask will be preserved.\n\n Raises\n ------\n ValueError\n If called with a masked 1D array or called with a 1D array and\n ``wrap_around=True``.\n\n Examples\n --------\n >>> c0, c1 = np.ogrid[-1:1:128j, -1:1:128j]\n >>> image = 12 * np.pi * np.exp(-(c0**2 + c1**2))\n >>> image_wrapped = np.angle(np.exp(1j * image))\n >>> image_unwrapped = unwrap_phase(image_wrapped)\n >>> np.std(image_unwrapped - image) < 1e-6 # A constant offset is normal\n True\n\n References\n ----------\n .. [1] Miguel Arevallilo Herraez, David R. Burton, Michael J. Lalor,\n and Munther A. Gdeisat, \"Fast two-dimensional phase-unwrapping\n algorithm based on sorting by reliability following a noncontinuous\n path\", Journal Applied Optics, Vol. 41, No. 35 (2002) 7437,\n .. [2] Abdul-Rahman, H., Gdeisat, M., Burton, D., & Lalor, M., \"Fast\n three-dimensional phase-unwrapping algorithm based on sorting by\n reliability following a non-continuous path. In W. Osten,\n C. Gorecki, & E. L. Novak (Eds.), Optical Metrology (2005) 32--40,\n International Society for Optics and Photonics.\n '''\n if image.ndim not in (1, 2, 3):\n raise ValueError('Image must be 1, 2, or 3 dimensional')\n if isinstance(wrap_around, bool):\n wrap_around = [wrap_around] * image.ndim\n elif (hasattr(wrap_around, '__getitem__')\n and not isinstance(wrap_around, string_types)):\n if len(wrap_around) != image.ndim:\n raise ValueError('Length of `wrap_around` must equal the '\n 'dimensionality of image')\n wrap_around = [bool(wa) for wa in wrap_around]\n else:\n raise ValueError('`wrap_around` must be a bool or a sequence with '\n 'length equal to the dimensionality of image')\n if image.ndim == 1:\n if np.ma.isMaskedArray(image):\n raise ValueError('1D masked images cannot be unwrapped')\n if wrap_around[0]:\n raise ValueError('`wrap_around` is not supported for 1D images')\n if image.ndim in (2, 3) and 1 in image.shape:\n warnings.warn('Image has a length 1 dimension. Consider using an '\n 'array of lower dimensionality to use a more efficient '\n 'algorithm')\n\n if np.ma.isMaskedArray(image):\n mask = np.require(np.ma.getmaskarray(image), np.uint8, ['C'])\n else:\n mask = np.zeros_like(image, dtype=np.uint8, order='C')\n\n image_not_masked = np.asarray(\n np.ma.getdata(image), dtype=np.double, order='C')\n image_unwrapped = np.empty_like(image, dtype=np.double, order='C',\n subok=False)\n\n if image.ndim == 1:\n unwrap_1d(image_not_masked, image_unwrapped)\n elif image.ndim == 2:\n unwrap_2d(image_not_masked, mask, image_unwrapped,\n wrap_around, seed)\n elif image.ndim == 3:\n unwrap_3d(image_not_masked, mask, image_unwrapped,\n wrap_around, seed)\n\n if np.ma.isMaskedArray(image):\n return np.ma.array(image_unwrapped, mask=mask)\n else:\n return image_unwrapped\n", "path": "skimage/restoration/unwrap.py"}]}
| 1,881 | 249 |
gh_patches_debug_50539
|
rasdani/github-patches
|
git_diff
|
mars-project__mars-291
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG] Chinese document layout has a link error.
<!--
Thank you for your contribution!
Please review https://github.com/mars-project/mars/blob/master/CONTRIBUTING.rst before opening an issue.
-->
**Describe the bug**
A clear and concise description of what the bug is.
Chinese document layout has a connection error.
doc link:[https://mars-project.readthedocs.io/zh_CN/latest/tensor/overview.html](https://mars-project.readthedocs.io/zh_CN/latest/tensor/overview.html)

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docs/source/norm_zh.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 # Copyright 1999-2018 Alibaba Group Holding Ltd.
4 #
5 # Licensed under the Apache License, Version 2.0 (the "License");
6 # you may not use this file except in compliance with the License.
7 # You may obtain a copy of the License at
8 #
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # Unless required by applicable law or agreed to in writing, software
12 # distributed under the License is distributed on an "AS IS" BASIS,
13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 # See the License for the specific language governing permissions and
15 # limitations under the License.
16
17 """
18 This file folds Chinese po files by hacking babel.messages.pofile.normalize
19 using jieba text segment library instead of regex
20 """
21
22 import datetime
23 import os
24
25 from babel.messages import pofile
26 from babel.messages.pofile import escape
27
28
29 def _zh_len(s):
30 """
31 Calculate text length in Chinese
32 """
33 try:
34 return len(s.encode('gb2312'))
35 except ValueError:
36 return len(s)
37
38
39 def _zh_split(s):
40 """
41 Split text length in Chinese
42 """
43 import jieba
44 try:
45 s.encode('ascii')
46 has_zh = False
47 except ValueError:
48 has_zh = True
49
50 if has_zh:
51 return list(jieba.cut(s))
52 else:
53 return pofile.WORD_SEP.split(s)
54
55
56 # code modified from babel.messages.pofile (hash 359ecffca479dfe032d0f7210d5cd8160599c816)
57 def _normalize(string, prefix='', width=76):
58 r"""Convert a string into a format that is appropriate for .po files.
59 >>> print(normalize('''Say:
60 ... "hello, world!"
61 ... ''', width=None))
62 ""
63 "Say:\n"
64 " \"hello, world!\"\n"
65 >>> print(normalize('''Say:
66 ... "Lorem ipsum dolor sit amet, consectetur adipisicing elit, "
67 ... ''', width=32))
68 ""
69 "Say:\n"
70 " \"Lorem ipsum dolor sit "
71 "amet, consectetur adipisicing"
72 " elit, \"\n"
73 :param string: the string to normalize
74 :param prefix: a string that should be prepended to every line
75 :param width: the maximum line width; use `None`, 0, or a negative number
76 to completely disable line wrapping
77 """
78
79 if width and width > 0:
80 prefixlen = _zh_len(prefix)
81 lines = []
82 for line in string.splitlines(True):
83 if _zh_len(escape(line)) + prefixlen > width:
84 chunks = _zh_split(line)
85 chunks.reverse()
86 while chunks:
87 buf = []
88 size = 2
89 while chunks:
90 l = _zh_len(escape(chunks[-1])) - 2 + prefixlen # noqa: E741
91 if size + l < width:
92 buf.append(chunks.pop())
93 size += l
94 else:
95 if not buf:
96 # handle long chunks by putting them on a
97 # separate line
98 buf.append(chunks.pop())
99 break
100 lines.append(u''.join(buf))
101 else:
102 lines.append(line)
103 else:
104 lines = string.splitlines(True)
105
106 if len(lines) <= 1:
107 return escape(string)
108
109 # Remove empty trailing line
110 if lines and not lines[-1]:
111 del lines[-1]
112 lines[-1] += '\n'
113 return u'""\n' + u'\n'.join([(prefix + escape(line)) for line in lines])
114
115
116 def main():
117 try:
118 import jieba # noqa: F401
119 except ImportError:
120 return
121
122 pofile.normalize = _normalize
123 for root, dirs, files in os.walk('.'):
124 if 'zh' not in root:
125 continue
126 for f in files:
127 if not f.endswith('.po'):
128 continue
129 path = os.path.join(root, f)
130
131 # only modify recent-changed files
132 modify_time = datetime.datetime.fromtimestamp(os.path.getmtime(path))
133 if (datetime.datetime.now() - modify_time).total_seconds() > 1800:
134 continue
135
136 with open(path, 'rb') as inpf:
137 catalog = pofile.read_po(inpf)
138 with open(path, 'wb') as outf:
139 pofile.write_po(outf, catalog)
140
141
142 if __name__ == '__main__':
143 main()
144
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/docs/source/norm_zh.py b/docs/source/norm_zh.py
--- a/docs/source/norm_zh.py
+++ b/docs/source/norm_zh.py
@@ -130,7 +130,7 @@
# only modify recent-changed files
modify_time = datetime.datetime.fromtimestamp(os.path.getmtime(path))
- if (datetime.datetime.now() - modify_time).total_seconds() > 1800:
+ if (datetime.datetime.now() - modify_time).total_seconds() > 120:
continue
with open(path, 'rb') as inpf:
|
{"golden_diff": "diff --git a/docs/source/norm_zh.py b/docs/source/norm_zh.py\n--- a/docs/source/norm_zh.py\n+++ b/docs/source/norm_zh.py\n@@ -130,7 +130,7 @@\n \n # only modify recent-changed files\n modify_time = datetime.datetime.fromtimestamp(os.path.getmtime(path))\n- if (datetime.datetime.now() - modify_time).total_seconds() > 1800:\n+ if (datetime.datetime.now() - modify_time).total_seconds() > 120:\n continue\n \n with open(path, 'rb') as inpf:\n", "issue": "[BUG] Chinese document layout has a link error.\n<!--\r\nThank you for your contribution!\r\n\r\nPlease review https://github.com/mars-project/mars/blob/master/CONTRIBUTING.rst before opening an issue.\r\n-->\r\n\r\n**Describe the bug**\r\nA clear and concise description of what the bug is.\r\n\r\nChinese document layout has a connection error.\r\ndoc link:[https://mars-project.readthedocs.io/zh_CN/latest/tensor/overview.html](https://mars-project.readthedocs.io/zh_CN/latest/tensor/overview.html)\r\n\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Copyright 1999-2018 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nThis file folds Chinese po files by hacking babel.messages.pofile.normalize\nusing jieba text segment library instead of regex\n\"\"\"\n\nimport datetime\nimport os\n\nfrom babel.messages import pofile\nfrom babel.messages.pofile import escape\n\n\ndef _zh_len(s):\n \"\"\"\n Calculate text length in Chinese\n \"\"\"\n try:\n return len(s.encode('gb2312'))\n except ValueError:\n return len(s)\n\n\ndef _zh_split(s):\n \"\"\"\n Split text length in Chinese\n \"\"\"\n import jieba\n try:\n s.encode('ascii')\n has_zh = False\n except ValueError:\n has_zh = True\n\n if has_zh:\n return list(jieba.cut(s))\n else:\n return pofile.WORD_SEP.split(s)\n\n\n# code modified from babel.messages.pofile (hash 359ecffca479dfe032d0f7210d5cd8160599c816)\ndef _normalize(string, prefix='', width=76):\n r\"\"\"Convert a string into a format that is appropriate for .po files.\n >>> print(normalize('''Say:\n ... \"hello, world!\"\n ... ''', width=None))\n \"\"\n \"Say:\\n\"\n \" \\\"hello, world!\\\"\\n\"\n >>> print(normalize('''Say:\n ... \"Lorem ipsum dolor sit amet, consectetur adipisicing elit, \"\n ... ''', width=32))\n \"\"\n \"Say:\\n\"\n \" \\\"Lorem ipsum dolor sit \"\n \"amet, consectetur adipisicing\"\n \" elit, \\\"\\n\"\n :param string: the string to normalize\n :param prefix: a string that should be prepended to every line\n :param width: the maximum line width; use `None`, 0, or a negative number\n to completely disable line wrapping\n \"\"\"\n\n if width and width > 0:\n prefixlen = _zh_len(prefix)\n lines = []\n for line in string.splitlines(True):\n if _zh_len(escape(line)) + prefixlen > width:\n chunks = _zh_split(line)\n chunks.reverse()\n while chunks:\n buf = []\n size = 2\n while chunks:\n l = _zh_len(escape(chunks[-1])) - 2 + prefixlen # noqa: E741\n if size + l < width:\n buf.append(chunks.pop())\n size += l\n else:\n if not buf:\n # handle long chunks by putting them on a\n # separate line\n buf.append(chunks.pop())\n break\n lines.append(u''.join(buf))\n else:\n lines.append(line)\n else:\n lines = string.splitlines(True)\n\n if len(lines) <= 1:\n return escape(string)\n\n # Remove empty trailing line\n if lines and not lines[-1]:\n del lines[-1]\n lines[-1] += '\\n'\n return u'\"\"\\n' + u'\\n'.join([(prefix + escape(line)) for line in lines])\n\n\ndef main():\n try:\n import jieba # noqa: F401\n except ImportError:\n return\n\n pofile.normalize = _normalize\n for root, dirs, files in os.walk('.'):\n if 'zh' not in root:\n continue\n for f in files:\n if not f.endswith('.po'):\n continue\n path = os.path.join(root, f)\n\n # only modify recent-changed files\n modify_time = datetime.datetime.fromtimestamp(os.path.getmtime(path))\n if (datetime.datetime.now() - modify_time).total_seconds() > 1800:\n continue\n\n with open(path, 'rb') as inpf:\n catalog = pofile.read_po(inpf)\n with open(path, 'wb') as outf:\n pofile.write_po(outf, catalog)\n\n\nif __name__ == '__main__':\n main()\n", "path": "docs/source/norm_zh.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Copyright 1999-2018 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nThis file folds Chinese po files by hacking babel.messages.pofile.normalize\nusing jieba text segment library instead of regex\n\"\"\"\n\nimport datetime\nimport os\n\nfrom babel.messages import pofile\nfrom babel.messages.pofile import escape\n\n\ndef _zh_len(s):\n \"\"\"\n Calculate text length in Chinese\n \"\"\"\n try:\n return len(s.encode('gb2312'))\n except ValueError:\n return len(s)\n\n\ndef _zh_split(s):\n \"\"\"\n Split text length in Chinese\n \"\"\"\n import jieba\n try:\n s.encode('ascii')\n has_zh = False\n except ValueError:\n has_zh = True\n\n if has_zh:\n return list(jieba.cut(s))\n else:\n return pofile.WORD_SEP.split(s)\n\n\n# code modified from babel.messages.pofile (hash 359ecffca479dfe032d0f7210d5cd8160599c816)\ndef _normalize(string, prefix='', width=76):\n r\"\"\"Convert a string into a format that is appropriate for .po files.\n >>> print(normalize('''Say:\n ... \"hello, world!\"\n ... ''', width=None))\n \"\"\n \"Say:\\n\"\n \" \\\"hello, world!\\\"\\n\"\n >>> print(normalize('''Say:\n ... \"Lorem ipsum dolor sit amet, consectetur adipisicing elit, \"\n ... ''', width=32))\n \"\"\n \"Say:\\n\"\n \" \\\"Lorem ipsum dolor sit \"\n \"amet, consectetur adipisicing\"\n \" elit, \\\"\\n\"\n :param string: the string to normalize\n :param prefix: a string that should be prepended to every line\n :param width: the maximum line width; use `None`, 0, or a negative number\n to completely disable line wrapping\n \"\"\"\n\n if width and width > 0:\n prefixlen = _zh_len(prefix)\n lines = []\n for line in string.splitlines(True):\n if _zh_len(escape(line)) + prefixlen > width:\n chunks = _zh_split(line)\n chunks.reverse()\n while chunks:\n buf = []\n size = 2\n while chunks:\n l = _zh_len(escape(chunks[-1])) - 2 + prefixlen # noqa: E741\n if size + l < width:\n buf.append(chunks.pop())\n size += l\n else:\n if not buf:\n # handle long chunks by putting them on a\n # separate line\n buf.append(chunks.pop())\n break\n lines.append(u''.join(buf))\n else:\n lines.append(line)\n else:\n lines = string.splitlines(True)\n\n if len(lines) <= 1:\n return escape(string)\n\n # Remove empty trailing line\n if lines and not lines[-1]:\n del lines[-1]\n lines[-1] += '\\n'\n return u'\"\"\\n' + u'\\n'.join([(prefix + escape(line)) for line in lines])\n\n\ndef main():\n try:\n import jieba # noqa: F401\n except ImportError:\n return\n\n pofile.normalize = _normalize\n for root, dirs, files in os.walk('.'):\n if 'zh' not in root:\n continue\n for f in files:\n if not f.endswith('.po'):\n continue\n path = os.path.join(root, f)\n\n # only modify recent-changed files\n modify_time = datetime.datetime.fromtimestamp(os.path.getmtime(path))\n if (datetime.datetime.now() - modify_time).total_seconds() > 120:\n continue\n\n with open(path, 'rb') as inpf:\n catalog = pofile.read_po(inpf)\n with open(path, 'wb') as outf:\n pofile.write_po(outf, catalog)\n\n\nif __name__ == '__main__':\n main()\n", "path": "docs/source/norm_zh.py"}]}
| 1,783 | 136 |
gh_patches_debug_20191
|
rasdani/github-patches
|
git_diff
|
mathesar-foundation__mathesar-267
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
DB function to edit column names
**Problem**
<!-- Please provide a clear and concise description of the problem that this feature request is designed to solve.-->
The user might want to edit column names.
**Proposed solution**
<!-- A clear and concise description of your proposed solution or feature. -->
We need a `db` function to allow the user to edit the name of a column. We need to check and fix foreign keys and other references to the column.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `db/columns.py`
Content:
```
1 from sqlalchemy import Column, Integer, ForeignKey
2 from db import constants
3
4
5 NULLABLE = "nullable"
6 PRIMARY_KEY = "primary_key"
7 TYPE = "type"
8
9 ID_TYPE = Integer
10 DEFAULT_COLUMNS = {
11 constants.ID: {TYPE: ID_TYPE, PRIMARY_KEY: True, NULLABLE: False}
12 }
13
14
15 class MathesarColumn(Column):
16 """
17 This class constrains the possible arguments, enabling us to include
18 a copy method (which has been deprecated in upstream SQLAlchemy since
19 1.4). The idea is that we can faithfully copy the subset of the
20 column definition that we care about, and this class defines that
21 subset.
22 """
23 def __init__(
24 self,
25 name,
26 sa_type,
27 foreign_keys=set(),
28 primary_key=False,
29 nullable=True,
30 ):
31 """
32 Construct a new ``MathesarColumn`` object.
33
34 Required arguments:
35 name -- String giving the name of the column in the database.
36 sa_type -- the SQLAlchemy type of the column.
37
38 Optional keyword arguments:
39 primary_key -- Boolean giving whether the column is a primary key.
40 """
41 super().__init__(
42 *foreign_keys,
43 name=name,
44 type_=sa_type,
45 primary_key=primary_key,
46 nullable=nullable,
47 )
48
49 @classmethod
50 def from_column(cls, column):
51 """
52 This alternate init method creates a new column (a copy) of the
53 given column. It respects only the properties in the __init__
54 of the MathesarColumn.
55 """
56 fkeys = {ForeignKey(fk.target_fullname) for fk in column.foreign_keys}
57 return cls(
58 column.name,
59 column.type,
60 foreign_keys=fkeys,
61 primary_key=column.primary_key,
62 nullable=column.nullable,
63 )
64
65 @property
66 def is_default(self):
67 default_def = DEFAULT_COLUMNS.get(self.name, False)
68 return (
69 default_def
70 and self.type.python_type == default_def[TYPE]().python_type
71 and self.primary_key == default_def.get(PRIMARY_KEY, False)
72 and self.nullable == default_def.get(NULLABLE, True)
73 )
74
75
76 def get_default_mathesar_column_list():
77 return [
78 MathesarColumn(
79 c,
80 DEFAULT_COLUMNS[c][TYPE],
81 primary_key=DEFAULT_COLUMNS[c][PRIMARY_KEY]
82 )
83 for c in DEFAULT_COLUMNS
84 ]
85
86
87 def init_mathesar_table_column_list_with_defaults(column_list):
88 default_columns = get_default_mathesar_column_list()
89 given_columns = [MathesarColumn.from_column(c) for c in column_list]
90 return default_columns + given_columns
91
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/db/columns.py b/db/columns.py
--- a/db/columns.py
+++ b/db/columns.py
@@ -1,4 +1,4 @@
-from sqlalchemy import Column, Integer, ForeignKey
+from sqlalchemy import Column, Integer, ForeignKey, Table, DDL, MetaData
from db import constants
@@ -88,3 +88,19 @@
default_columns = get_default_mathesar_column_list()
given_columns = [MathesarColumn.from_column(c) for c in column_list]
return default_columns + given_columns
+
+
+def rename_column(schema, table_name, column_name, new_column_name, engine):
+ _preparer = engine.dialect.identifier_preparer
+ with engine.begin() as conn:
+ metadata = MetaData(bind=engine, schema=schema)
+ table = Table(table_name, metadata, schema=schema, autoload_with=engine)
+ column = table.columns[column_name]
+ prepared_table_name = _preparer.format_table(table)
+ prepared_column_name = _preparer.format_column(column)
+ prepared_new_column_name = _preparer.quote(new_column_name)
+ alter_stmt = f"""
+ ALTER TABLE {prepared_table_name}
+ RENAME {prepared_column_name} TO {prepared_new_column_name}
+ """
+ conn.execute(DDL(alter_stmt))
|
{"golden_diff": "diff --git a/db/columns.py b/db/columns.py\n--- a/db/columns.py\n+++ b/db/columns.py\n@@ -1,4 +1,4 @@\n-from sqlalchemy import Column, Integer, ForeignKey\n+from sqlalchemy import Column, Integer, ForeignKey, Table, DDL, MetaData\n from db import constants\n \n \n@@ -88,3 +88,19 @@\n default_columns = get_default_mathesar_column_list()\n given_columns = [MathesarColumn.from_column(c) for c in column_list]\n return default_columns + given_columns\n+\n+\n+def rename_column(schema, table_name, column_name, new_column_name, engine):\n+ _preparer = engine.dialect.identifier_preparer\n+ with engine.begin() as conn:\n+ metadata = MetaData(bind=engine, schema=schema)\n+ table = Table(table_name, metadata, schema=schema, autoload_with=engine)\n+ column = table.columns[column_name]\n+ prepared_table_name = _preparer.format_table(table)\n+ prepared_column_name = _preparer.format_column(column)\n+ prepared_new_column_name = _preparer.quote(new_column_name)\n+ alter_stmt = f\"\"\"\n+ ALTER TABLE {prepared_table_name}\n+ RENAME {prepared_column_name} TO {prepared_new_column_name}\n+ \"\"\"\n+ conn.execute(DDL(alter_stmt))\n", "issue": "DB function to edit column names\n**Problem**\r\n<!-- Please provide a clear and concise description of the problem that this feature request is designed to solve.-->\r\nThe user might want to edit column names.\r\n\r\n**Proposed solution**\r\n<!-- A clear and concise description of your proposed solution or feature. -->\r\nWe need a `db` function to allow the user to edit the name of a column. We need to check and fix foreign keys and other references to the column.\n", "before_files": [{"content": "from sqlalchemy import Column, Integer, ForeignKey\nfrom db import constants\n\n\nNULLABLE = \"nullable\"\nPRIMARY_KEY = \"primary_key\"\nTYPE = \"type\"\n\nID_TYPE = Integer\nDEFAULT_COLUMNS = {\n constants.ID: {TYPE: ID_TYPE, PRIMARY_KEY: True, NULLABLE: False}\n}\n\n\nclass MathesarColumn(Column):\n \"\"\"\n This class constrains the possible arguments, enabling us to include\n a copy method (which has been deprecated in upstream SQLAlchemy since\n 1.4). The idea is that we can faithfully copy the subset of the\n column definition that we care about, and this class defines that\n subset.\n \"\"\"\n def __init__(\n self,\n name,\n sa_type,\n foreign_keys=set(),\n primary_key=False,\n nullable=True,\n ):\n \"\"\"\n Construct a new ``MathesarColumn`` object.\n\n Required arguments:\n name -- String giving the name of the column in the database.\n sa_type -- the SQLAlchemy type of the column.\n\n Optional keyword arguments:\n primary_key -- Boolean giving whether the column is a primary key.\n \"\"\"\n super().__init__(\n *foreign_keys,\n name=name,\n type_=sa_type,\n primary_key=primary_key,\n nullable=nullable,\n )\n\n @classmethod\n def from_column(cls, column):\n \"\"\"\n This alternate init method creates a new column (a copy) of the\n given column. It respects only the properties in the __init__\n of the MathesarColumn.\n \"\"\"\n fkeys = {ForeignKey(fk.target_fullname) for fk in column.foreign_keys}\n return cls(\n column.name,\n column.type,\n foreign_keys=fkeys,\n primary_key=column.primary_key,\n nullable=column.nullable,\n )\n\n @property\n def is_default(self):\n default_def = DEFAULT_COLUMNS.get(self.name, False)\n return (\n default_def\n and self.type.python_type == default_def[TYPE]().python_type\n and self.primary_key == default_def.get(PRIMARY_KEY, False)\n and self.nullable == default_def.get(NULLABLE, True)\n )\n\n\ndef get_default_mathesar_column_list():\n return [\n MathesarColumn(\n c,\n DEFAULT_COLUMNS[c][TYPE],\n primary_key=DEFAULT_COLUMNS[c][PRIMARY_KEY]\n )\n for c in DEFAULT_COLUMNS\n ]\n\n\ndef init_mathesar_table_column_list_with_defaults(column_list):\n default_columns = get_default_mathesar_column_list()\n given_columns = [MathesarColumn.from_column(c) for c in column_list]\n return default_columns + given_columns\n", "path": "db/columns.py"}], "after_files": [{"content": "from sqlalchemy import Column, Integer, ForeignKey, Table, DDL, MetaData\nfrom db import constants\n\n\nNULLABLE = \"nullable\"\nPRIMARY_KEY = \"primary_key\"\nTYPE = \"type\"\n\nID_TYPE = Integer\nDEFAULT_COLUMNS = {\n constants.ID: {TYPE: ID_TYPE, PRIMARY_KEY: True, NULLABLE: False}\n}\n\n\nclass MathesarColumn(Column):\n \"\"\"\n This class constrains the possible arguments, enabling us to include\n a copy method (which has been deprecated in upstream SQLAlchemy since\n 1.4). The idea is that we can faithfully copy the subset of the\n column definition that we care about, and this class defines that\n subset.\n \"\"\"\n def __init__(\n self,\n name,\n sa_type,\n foreign_keys=set(),\n primary_key=False,\n nullable=True,\n ):\n \"\"\"\n Construct a new ``MathesarColumn`` object.\n\n Required arguments:\n name -- String giving the name of the column in the database.\n sa_type -- the SQLAlchemy type of the column.\n\n Optional keyword arguments:\n primary_key -- Boolean giving whether the column is a primary key.\n \"\"\"\n super().__init__(\n *foreign_keys,\n name=name,\n type_=sa_type,\n primary_key=primary_key,\n nullable=nullable,\n )\n\n @classmethod\n def from_column(cls, column):\n \"\"\"\n This alternate init method creates a new column (a copy) of the\n given column. It respects only the properties in the __init__\n of the MathesarColumn.\n \"\"\"\n fkeys = {ForeignKey(fk.target_fullname) for fk in column.foreign_keys}\n return cls(\n column.name,\n column.type,\n foreign_keys=fkeys,\n primary_key=column.primary_key,\n nullable=column.nullable,\n )\n\n @property\n def is_default(self):\n default_def = DEFAULT_COLUMNS.get(self.name, False)\n return (\n default_def\n and self.type.python_type == default_def[TYPE]().python_type\n and self.primary_key == default_def.get(PRIMARY_KEY, False)\n and self.nullable == default_def.get(NULLABLE, True)\n )\n\n\ndef get_default_mathesar_column_list():\n return [\n MathesarColumn(\n c,\n DEFAULT_COLUMNS[c][TYPE],\n primary_key=DEFAULT_COLUMNS[c][PRIMARY_KEY]\n )\n for c in DEFAULT_COLUMNS\n ]\n\n\ndef init_mathesar_table_column_list_with_defaults(column_list):\n default_columns = get_default_mathesar_column_list()\n given_columns = [MathesarColumn.from_column(c) for c in column_list]\n return default_columns + given_columns\n\n\ndef rename_column(schema, table_name, column_name, new_column_name, engine):\n _preparer = engine.dialect.identifier_preparer\n with engine.begin() as conn:\n metadata = MetaData(bind=engine, schema=schema)\n table = Table(table_name, metadata, schema=schema, autoload_with=engine)\n column = table.columns[column_name]\n prepared_table_name = _preparer.format_table(table)\n prepared_column_name = _preparer.format_column(column)\n prepared_new_column_name = _preparer.quote(new_column_name)\n alter_stmt = f\"\"\"\n ALTER TABLE {prepared_table_name}\n RENAME {prepared_column_name} TO {prepared_new_column_name}\n \"\"\"\n conn.execute(DDL(alter_stmt))\n", "path": "db/columns.py"}]}
| 1,084 | 291 |
gh_patches_debug_5030
|
rasdani/github-patches
|
git_diff
|
geopandas__geopandas-1544
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
BUG: test_numerical_operations fails under pandas 1.1.0
[Failure](https://travis-ci.org/github/geopandas/geopandas/jobs/712675986#L2140) in `test_numerical_operations` (`idxmin` error type) -> pandas-dev/pandas#32749. Even though it is a known regression in pandas, we should handle it (at least skip tests for now).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `geopandas/_compat.py`
Content:
```
1 from distutils.version import LooseVersion
2 import importlib
3 import os
4 import warnings
5
6 import pandas as pd
7 import shapely
8
9 # -----------------------------------------------------------------------------
10 # pandas compat
11 # -----------------------------------------------------------------------------
12
13 PANDAS_GE_025 = str(pd.__version__) >= LooseVersion("0.25.0")
14 PANDAS_GE_10 = str(pd.__version__) >= LooseVersion("0.26.0.dev")
15 PANDAS_GE_11 = str(pd.__version__) >= LooseVersion("1.1.0.dev")
16
17
18 # -----------------------------------------------------------------------------
19 # Shapely / PyGEOS compat
20 # -----------------------------------------------------------------------------
21
22
23 SHAPELY_GE_17 = str(shapely.__version__) >= LooseVersion("1.7.0")
24
25 HAS_PYGEOS = None
26 USE_PYGEOS = None
27 PYGEOS_SHAPELY_COMPAT = None
28
29 try:
30 import pygeos # noqa
31
32 HAS_PYGEOS = True
33 except ImportError:
34 HAS_PYGEOS = False
35
36
37 def set_use_pygeos(val=None):
38 """
39 Set the global configuration on whether to use PyGEOS or not.
40
41 The default is use PyGEOS if it is installed. This can be overridden
42 with an environment variable USE_PYGEOS (this is only checked at
43 first import, cannot be changed during interactive session).
44
45 Alternatively, pass a value here to force a True/False value.
46 """
47 global USE_PYGEOS
48 global PYGEOS_SHAPELY_COMPAT
49
50 if val is not None:
51 USE_PYGEOS = bool(val)
52 else:
53 if USE_PYGEOS is None:
54
55 USE_PYGEOS = HAS_PYGEOS
56
57 env_use_pygeos = os.getenv("USE_PYGEOS", None)
58 if env_use_pygeos is not None:
59 USE_PYGEOS = bool(int(env_use_pygeos))
60
61 # validate the pygeos version
62 if USE_PYGEOS:
63 try:
64 import pygeos # noqa
65
66 # validate the pygeos version
67 if not str(pygeos.__version__) >= LooseVersion("0.6"):
68 raise ImportError(
69 "PyGEOS >= 0.6 is required, version {0} is installed".format(
70 pygeos.__version__
71 )
72 )
73
74 # Check whether Shapely and PyGEOS use the same GEOS version.
75 # Based on PyGEOS from_shapely implementation.
76
77 from shapely.geos import geos_version_string as shapely_geos_version
78 from pygeos import geos_capi_version_string
79
80 # shapely has something like: "3.6.2-CAPI-1.10.2 4d2925d6"
81 # pygeos has something like: "3.6.2-CAPI-1.10.2"
82 if not shapely_geos_version.startswith(geos_capi_version_string):
83 warnings.warn(
84 "The Shapely GEOS version ({}) is incompatible with the GEOS "
85 "version PyGEOS was compiled with ({}). Conversions between both "
86 "will be slow.".format(
87 shapely_geos_version, geos_capi_version_string
88 )
89 )
90 PYGEOS_SHAPELY_COMPAT = False
91 else:
92 PYGEOS_SHAPELY_COMPAT = True
93
94 except ImportError:
95 raise ImportError(
96 "To use the PyGEOS speed-ups within GeoPandas, you need to install "
97 "PyGEOS: 'conda install pygeos' or 'pip install pygeos'"
98 )
99
100
101 set_use_pygeos()
102
103
104 def import_optional_dependency(name: str, extra: str = ""):
105 """
106 Import an optional dependency.
107
108 Adapted from pandas.compat._optional::import_optional_dependency
109
110 Raises a formatted ImportError if the module is not present.
111
112 Parameters
113 ----------
114 name : str
115 The module name.
116 extra : str
117 Additional text to include in the ImportError message.
118 Returns
119 -------
120 module
121 """
122 msg = """Missing optional dependency '{name}'. {extra} "
123 "Use pip or conda to install {name}.""".format(
124 name=name, extra=extra
125 )
126
127 if not isinstance(name, str):
128 raise ValueError(
129 "Invalid module name: '{name}'; must be a string".format(name=name)
130 )
131
132 try:
133 module = importlib.import_module(name)
134
135 except ImportError:
136 raise ImportError(msg) from None
137
138 return module
139
140
141 # -----------------------------------------------------------------------------
142 # RTree compat
143 # -----------------------------------------------------------------------------
144
145 HAS_RTREE = None
146 RTREE_GE_094 = False
147 try:
148 import rtree # noqa
149
150 HAS_RTREE = True
151 except ImportError:
152 HAS_RTREE = False
153
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/geopandas/_compat.py b/geopandas/_compat.py
--- a/geopandas/_compat.py
+++ b/geopandas/_compat.py
@@ -12,7 +12,7 @@
PANDAS_GE_025 = str(pd.__version__) >= LooseVersion("0.25.0")
PANDAS_GE_10 = str(pd.__version__) >= LooseVersion("0.26.0.dev")
-PANDAS_GE_11 = str(pd.__version__) >= LooseVersion("1.1.0.dev")
+PANDAS_GE_11 = str(pd.__version__) >= LooseVersion("1.1.0")
# -----------------------------------------------------------------------------
|
{"golden_diff": "diff --git a/geopandas/_compat.py b/geopandas/_compat.py\n--- a/geopandas/_compat.py\n+++ b/geopandas/_compat.py\n@@ -12,7 +12,7 @@\n \n PANDAS_GE_025 = str(pd.__version__) >= LooseVersion(\"0.25.0\")\n PANDAS_GE_10 = str(pd.__version__) >= LooseVersion(\"0.26.0.dev\")\n-PANDAS_GE_11 = str(pd.__version__) >= LooseVersion(\"1.1.0.dev\")\n+PANDAS_GE_11 = str(pd.__version__) >= LooseVersion(\"1.1.0\")\n \n \n # -----------------------------------------------------------------------------\n", "issue": "BUG: test_numerical_operations fails under pandas 1.1.0\n[Failure](https://travis-ci.org/github/geopandas/geopandas/jobs/712675986#L2140) in `test_numerical_operations` (`idxmin` error type) -> pandas-dev/pandas#32749. Even though it is a known regression in pandas, we should handle it (at least skip tests for now).\n", "before_files": [{"content": "from distutils.version import LooseVersion\nimport importlib\nimport os\nimport warnings\n\nimport pandas as pd\nimport shapely\n\n# -----------------------------------------------------------------------------\n# pandas compat\n# -----------------------------------------------------------------------------\n\nPANDAS_GE_025 = str(pd.__version__) >= LooseVersion(\"0.25.0\")\nPANDAS_GE_10 = str(pd.__version__) >= LooseVersion(\"0.26.0.dev\")\nPANDAS_GE_11 = str(pd.__version__) >= LooseVersion(\"1.1.0.dev\")\n\n\n# -----------------------------------------------------------------------------\n# Shapely / PyGEOS compat\n# -----------------------------------------------------------------------------\n\n\nSHAPELY_GE_17 = str(shapely.__version__) >= LooseVersion(\"1.7.0\")\n\nHAS_PYGEOS = None\nUSE_PYGEOS = None\nPYGEOS_SHAPELY_COMPAT = None\n\ntry:\n import pygeos # noqa\n\n HAS_PYGEOS = True\nexcept ImportError:\n HAS_PYGEOS = False\n\n\ndef set_use_pygeos(val=None):\n \"\"\"\n Set the global configuration on whether to use PyGEOS or not.\n\n The default is use PyGEOS if it is installed. This can be overridden\n with an environment variable USE_PYGEOS (this is only checked at\n first import, cannot be changed during interactive session).\n\n Alternatively, pass a value here to force a True/False value.\n \"\"\"\n global USE_PYGEOS\n global PYGEOS_SHAPELY_COMPAT\n\n if val is not None:\n USE_PYGEOS = bool(val)\n else:\n if USE_PYGEOS is None:\n\n USE_PYGEOS = HAS_PYGEOS\n\n env_use_pygeos = os.getenv(\"USE_PYGEOS\", None)\n if env_use_pygeos is not None:\n USE_PYGEOS = bool(int(env_use_pygeos))\n\n # validate the pygeos version\n if USE_PYGEOS:\n try:\n import pygeos # noqa\n\n # validate the pygeos version\n if not str(pygeos.__version__) >= LooseVersion(\"0.6\"):\n raise ImportError(\n \"PyGEOS >= 0.6 is required, version {0} is installed\".format(\n pygeos.__version__\n )\n )\n\n # Check whether Shapely and PyGEOS use the same GEOS version.\n # Based on PyGEOS from_shapely implementation.\n\n from shapely.geos import geos_version_string as shapely_geos_version\n from pygeos import geos_capi_version_string\n\n # shapely has something like: \"3.6.2-CAPI-1.10.2 4d2925d6\"\n # pygeos has something like: \"3.6.2-CAPI-1.10.2\"\n if not shapely_geos_version.startswith(geos_capi_version_string):\n warnings.warn(\n \"The Shapely GEOS version ({}) is incompatible with the GEOS \"\n \"version PyGEOS was compiled with ({}). Conversions between both \"\n \"will be slow.\".format(\n shapely_geos_version, geos_capi_version_string\n )\n )\n PYGEOS_SHAPELY_COMPAT = False\n else:\n PYGEOS_SHAPELY_COMPAT = True\n\n except ImportError:\n raise ImportError(\n \"To use the PyGEOS speed-ups within GeoPandas, you need to install \"\n \"PyGEOS: 'conda install pygeos' or 'pip install pygeos'\"\n )\n\n\nset_use_pygeos()\n\n\ndef import_optional_dependency(name: str, extra: str = \"\"):\n \"\"\"\n Import an optional dependency.\n\n Adapted from pandas.compat._optional::import_optional_dependency\n\n Raises a formatted ImportError if the module is not present.\n\n Parameters\n ----------\n name : str\n The module name.\n extra : str\n Additional text to include in the ImportError message.\n Returns\n -------\n module\n \"\"\"\n msg = \"\"\"Missing optional dependency '{name}'. {extra} \"\n \"Use pip or conda to install {name}.\"\"\".format(\n name=name, extra=extra\n )\n\n if not isinstance(name, str):\n raise ValueError(\n \"Invalid module name: '{name}'; must be a string\".format(name=name)\n )\n\n try:\n module = importlib.import_module(name)\n\n except ImportError:\n raise ImportError(msg) from None\n\n return module\n\n\n# -----------------------------------------------------------------------------\n# RTree compat\n# -----------------------------------------------------------------------------\n\nHAS_RTREE = None\nRTREE_GE_094 = False\ntry:\n import rtree # noqa\n\n HAS_RTREE = True\nexcept ImportError:\n HAS_RTREE = False\n", "path": "geopandas/_compat.py"}], "after_files": [{"content": "from distutils.version import LooseVersion\nimport importlib\nimport os\nimport warnings\n\nimport pandas as pd\nimport shapely\n\n# -----------------------------------------------------------------------------\n# pandas compat\n# -----------------------------------------------------------------------------\n\nPANDAS_GE_025 = str(pd.__version__) >= LooseVersion(\"0.25.0\")\nPANDAS_GE_10 = str(pd.__version__) >= LooseVersion(\"0.26.0.dev\")\nPANDAS_GE_11 = str(pd.__version__) >= LooseVersion(\"1.1.0\")\n\n\n# -----------------------------------------------------------------------------\n# Shapely / PyGEOS compat\n# -----------------------------------------------------------------------------\n\n\nSHAPELY_GE_17 = str(shapely.__version__) >= LooseVersion(\"1.7.0\")\n\nHAS_PYGEOS = None\nUSE_PYGEOS = None\nPYGEOS_SHAPELY_COMPAT = None\n\ntry:\n import pygeos # noqa\n\n HAS_PYGEOS = True\nexcept ImportError:\n HAS_PYGEOS = False\n\n\ndef set_use_pygeos(val=None):\n \"\"\"\n Set the global configuration on whether to use PyGEOS or not.\n\n The default is use PyGEOS if it is installed. This can be overridden\n with an environment variable USE_PYGEOS (this is only checked at\n first import, cannot be changed during interactive session).\n\n Alternatively, pass a value here to force a True/False value.\n \"\"\"\n global USE_PYGEOS\n global PYGEOS_SHAPELY_COMPAT\n\n if val is not None:\n USE_PYGEOS = bool(val)\n else:\n if USE_PYGEOS is None:\n\n USE_PYGEOS = HAS_PYGEOS\n\n env_use_pygeos = os.getenv(\"USE_PYGEOS\", None)\n if env_use_pygeos is not None:\n USE_PYGEOS = bool(int(env_use_pygeos))\n\n # validate the pygeos version\n if USE_PYGEOS:\n try:\n import pygeos # noqa\n\n # validate the pygeos version\n if not str(pygeos.__version__) >= LooseVersion(\"0.6\"):\n raise ImportError(\n \"PyGEOS >= 0.6 is required, version {0} is installed\".format(\n pygeos.__version__\n )\n )\n\n # Check whether Shapely and PyGEOS use the same GEOS version.\n # Based on PyGEOS from_shapely implementation.\n\n from shapely.geos import geos_version_string as shapely_geos_version\n from pygeos import geos_capi_version_string\n\n # shapely has something like: \"3.6.2-CAPI-1.10.2 4d2925d6\"\n # pygeos has something like: \"3.6.2-CAPI-1.10.2\"\n if not shapely_geos_version.startswith(geos_capi_version_string):\n warnings.warn(\n \"The Shapely GEOS version ({}) is incompatible with the GEOS \"\n \"version PyGEOS was compiled with ({}). Conversions between both \"\n \"will be slow.\".format(\n shapely_geos_version, geos_capi_version_string\n )\n )\n PYGEOS_SHAPELY_COMPAT = False\n else:\n PYGEOS_SHAPELY_COMPAT = True\n\n except ImportError:\n raise ImportError(\n \"To use the PyGEOS speed-ups within GeoPandas, you need to install \"\n \"PyGEOS: 'conda install pygeos' or 'pip install pygeos'\"\n )\n\n\nset_use_pygeos()\n\n\ndef import_optional_dependency(name: str, extra: str = \"\"):\n \"\"\"\n Import an optional dependency.\n\n Adapted from pandas.compat._optional::import_optional_dependency\n\n Raises a formatted ImportError if the module is not present.\n\n Parameters\n ----------\n name : str\n The module name.\n extra : str\n Additional text to include in the ImportError message.\n Returns\n -------\n module\n \"\"\"\n msg = \"\"\"Missing optional dependency '{name}'. {extra} \"\n \"Use pip or conda to install {name}.\"\"\".format(\n name=name, extra=extra\n )\n\n if not isinstance(name, str):\n raise ValueError(\n \"Invalid module name: '{name}'; must be a string\".format(name=name)\n )\n\n try:\n module = importlib.import_module(name)\n\n except ImportError:\n raise ImportError(msg) from None\n\n return module\n\n\n# -----------------------------------------------------------------------------\n# RTree compat\n# -----------------------------------------------------------------------------\n\nHAS_RTREE = None\nRTREE_GE_094 = False\ntry:\n import rtree # noqa\n\n HAS_RTREE = True\nexcept ImportError:\n HAS_RTREE = False\n", "path": "geopandas/_compat.py"}]}
| 1,757 | 150 |
gh_patches_debug_23203
|
rasdani/github-patches
|
git_diff
|
carpentries__amy-2339
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Server Error when editing community roles
Reported by @ErinBecker at `/workshops/person/<id>/edit/#communityroles` and `/communityroles/role/<another id>/edit/` in production.
Reproduced on test AMY:
```
TypeError at /communityroles/role/25/edit/
the JSON object must be str, bytes or bytearray, not list
Error during template rendering
In template /webapps/test-amy.carpentries.org/repo/amy/templates/bootstrap4/field.html, error at line 39
{% crispy_field field %}
```
Also encountered locally when creating a community role and immediately editing it:
```
Exception Value: | 'NoneType' object is not iterable
/home/eli/amy/amy/communityroles/fields.py, line 20, in get_context
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `amy/communityroles/fields.py`
Content:
```
1 import json
2
3 from django import forms
4 from django.http import QueryDict
5 from django.utils.datastructures import MultiValueDict
6
7
8 class CustomKeysWidget(forms.TextInput):
9 template_name = "widgets/custom_keys_widget.html"
10
11 def __init__(self, *args, **kwargs):
12 self.subwidget_form = kwargs.pop("subwidget_form", forms.TextInput)
13 super().__init__(*args, **kwargs)
14
15 def apply_labels(self, labels: list[str]) -> None:
16 self.labels = labels[:]
17
18 def get_context(self, name: str, value: str, attrs: dict):
19 value_deserialized = json.loads(value)
20 value_deserialized_dict = dict(value_deserialized)
21 default_values = dict([(label, "") for label in self.labels])
22 context_value = default_values | value_deserialized_dict
23
24 context = super().get_context(name, context_value, attrs)
25 final_attrs = context["widget"]["attrs"]
26 id_ = context["widget"]["attrs"].get("id")
27
28 subwidgets = []
29 for index, (label, value) in enumerate(context_value.items()):
30 widget_attrs = final_attrs.copy()
31 if id_:
32 widget_attrs["id"] = "{id_}_{index}".format(id_=id_, index=index)
33
34 widget = self.subwidget_form()
35 subwidget_context = widget.get_context(name, value, widget_attrs)["widget"]
36 subwidgets.append(subwidget_context | {"label": label})
37
38 context["widget"]["subwidgets"] = subwidgets
39 return context
40
41 def value_from_datadict(
42 self, data: QueryDict, files: MultiValueDict, name: str
43 ) -> list[tuple[str, str]]:
44 """Prepare structure stored in database. The structure is tied to
45 `CommunityRole.custom_keys` expected format:
46 [
47 (label1, value1),
48 (label2, value2),
49 ...
50 ]
51 """
52 try:
53 values = data.getlist(name)
54 except AttributeError:
55 values = data.get(name, [])
56 return list(zip(self.labels, values))
57
58 def value_omitted_from_data(
59 self, data: QueryDict, files: MultiValueDict, name: str
60 ) -> bool:
61 return False
62
63
64 class CustomKeysJSONField(forms.JSONField):
65 def __init__(self, **kwargs):
66 kwargs.setdefault("widget", CustomKeysWidget)
67 super().__init__(**kwargs)
68
69 def apply_labels(self, labels: list[str]) -> None:
70 self.labels = labels[:]
71 self.widget.apply_labels(self.labels)
72
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/amy/communityroles/fields.py b/amy/communityroles/fields.py
--- a/amy/communityroles/fields.py
+++ b/amy/communityroles/fields.py
@@ -1,9 +1,12 @@
import json
+import logging
from django import forms
from django.http import QueryDict
from django.utils.datastructures import MultiValueDict
+logger = logging.getLogger("amy")
+
class CustomKeysWidget(forms.TextInput):
template_name = "widgets/custom_keys_widget.html"
@@ -17,7 +20,14 @@
def get_context(self, name: str, value: str, attrs: dict):
value_deserialized = json.loads(value)
- value_deserialized_dict = dict(value_deserialized)
+ try:
+ value_deserialized_dict = dict(value_deserialized)
+ except (ValueError, TypeError) as e:
+ logger.debug(
+ f"Failed to load custom key values {value_deserialized} to dict: {e}."
+ )
+ logger.debug("Proceeding without custom key values...")
+ value_deserialized_dict = {}
default_values = dict([(label, "") for label in self.labels])
context_value = default_values | value_deserialized_dict
|
{"golden_diff": "diff --git a/amy/communityroles/fields.py b/amy/communityroles/fields.py\n--- a/amy/communityroles/fields.py\n+++ b/amy/communityroles/fields.py\n@@ -1,9 +1,12 @@\n import json\n+import logging\n \n from django import forms\n from django.http import QueryDict\n from django.utils.datastructures import MultiValueDict\n \n+logger = logging.getLogger(\"amy\")\n+\n \n class CustomKeysWidget(forms.TextInput):\n template_name = \"widgets/custom_keys_widget.html\"\n@@ -17,7 +20,14 @@\n \n def get_context(self, name: str, value: str, attrs: dict):\n value_deserialized = json.loads(value)\n- value_deserialized_dict = dict(value_deserialized)\n+ try:\n+ value_deserialized_dict = dict(value_deserialized)\n+ except (ValueError, TypeError) as e:\n+ logger.debug(\n+ f\"Failed to load custom key values {value_deserialized} to dict: {e}.\"\n+ )\n+ logger.debug(\"Proceeding without custom key values...\")\n+ value_deserialized_dict = {}\n default_values = dict([(label, \"\") for label in self.labels])\n context_value = default_values | value_deserialized_dict\n", "issue": "Server Error when editing community roles\nReported by @ErinBecker at `/workshops/person/<id>/edit/#communityroles` and `/communityroles/role/<another id>/edit/` in production.\r\n\r\nReproduced on test AMY:\r\n```\r\nTypeError at /communityroles/role/25/edit/\r\nthe JSON object must be str, bytes or bytearray, not list\r\nError during template rendering\r\nIn template /webapps/test-amy.carpentries.org/repo/amy/templates/bootstrap4/field.html, error at line 39\r\n\r\n{% crispy_field field %}\r\n```\r\n\r\nAlso encountered locally when creating a community role and immediately editing it:\r\n```\r\nException Value: | 'NoneType' object is not iterable\r\n/home/eli/amy/amy/communityroles/fields.py, line 20, in get_context\r\n\r\n```\n", "before_files": [{"content": "import json\n\nfrom django import forms\nfrom django.http import QueryDict\nfrom django.utils.datastructures import MultiValueDict\n\n\nclass CustomKeysWidget(forms.TextInput):\n template_name = \"widgets/custom_keys_widget.html\"\n\n def __init__(self, *args, **kwargs):\n self.subwidget_form = kwargs.pop(\"subwidget_form\", forms.TextInput)\n super().__init__(*args, **kwargs)\n\n def apply_labels(self, labels: list[str]) -> None:\n self.labels = labels[:]\n\n def get_context(self, name: str, value: str, attrs: dict):\n value_deserialized = json.loads(value)\n value_deserialized_dict = dict(value_deserialized)\n default_values = dict([(label, \"\") for label in self.labels])\n context_value = default_values | value_deserialized_dict\n\n context = super().get_context(name, context_value, attrs)\n final_attrs = context[\"widget\"][\"attrs\"]\n id_ = context[\"widget\"][\"attrs\"].get(\"id\")\n\n subwidgets = []\n for index, (label, value) in enumerate(context_value.items()):\n widget_attrs = final_attrs.copy()\n if id_:\n widget_attrs[\"id\"] = \"{id_}_{index}\".format(id_=id_, index=index)\n\n widget = self.subwidget_form()\n subwidget_context = widget.get_context(name, value, widget_attrs)[\"widget\"]\n subwidgets.append(subwidget_context | {\"label\": label})\n\n context[\"widget\"][\"subwidgets\"] = subwidgets\n return context\n\n def value_from_datadict(\n self, data: QueryDict, files: MultiValueDict, name: str\n ) -> list[tuple[str, str]]:\n \"\"\"Prepare structure stored in database. The structure is tied to\n `CommunityRole.custom_keys` expected format:\n [\n (label1, value1),\n (label2, value2),\n ...\n ]\n \"\"\"\n try:\n values = data.getlist(name)\n except AttributeError:\n values = data.get(name, [])\n return list(zip(self.labels, values))\n\n def value_omitted_from_data(\n self, data: QueryDict, files: MultiValueDict, name: str\n ) -> bool:\n return False\n\n\nclass CustomKeysJSONField(forms.JSONField):\n def __init__(self, **kwargs):\n kwargs.setdefault(\"widget\", CustomKeysWidget)\n super().__init__(**kwargs)\n\n def apply_labels(self, labels: list[str]) -> None:\n self.labels = labels[:]\n self.widget.apply_labels(self.labels)\n", "path": "amy/communityroles/fields.py"}], "after_files": [{"content": "import json\nimport logging\n\nfrom django import forms\nfrom django.http import QueryDict\nfrom django.utils.datastructures import MultiValueDict\n\nlogger = logging.getLogger(\"amy\")\n\n\nclass CustomKeysWidget(forms.TextInput):\n template_name = \"widgets/custom_keys_widget.html\"\n\n def __init__(self, *args, **kwargs):\n self.subwidget_form = kwargs.pop(\"subwidget_form\", forms.TextInput)\n super().__init__(*args, **kwargs)\n\n def apply_labels(self, labels: list[str]) -> None:\n self.labels = labels[:]\n\n def get_context(self, name: str, value: str, attrs: dict):\n value_deserialized = json.loads(value)\n try:\n value_deserialized_dict = dict(value_deserialized)\n except (ValueError, TypeError) as e:\n logger.debug(\n f\"Failed to load custom key values {value_deserialized} to dict: {e}.\"\n )\n logger.debug(\"Proceeding without custom key values...\")\n value_deserialized_dict = {}\n default_values = dict([(label, \"\") for label in self.labels])\n context_value = default_values | value_deserialized_dict\n\n context = super().get_context(name, context_value, attrs)\n final_attrs = context[\"widget\"][\"attrs\"]\n id_ = context[\"widget\"][\"attrs\"].get(\"id\")\n\n subwidgets = []\n for index, (label, value) in enumerate(context_value.items()):\n widget_attrs = final_attrs.copy()\n if id_:\n widget_attrs[\"id\"] = \"{id_}_{index}\".format(id_=id_, index=index)\n\n widget = self.subwidget_form()\n subwidget_context = widget.get_context(name, value, widget_attrs)[\"widget\"]\n subwidgets.append(subwidget_context | {\"label\": label})\n\n context[\"widget\"][\"subwidgets\"] = subwidgets\n return context\n\n def value_from_datadict(\n self, data: QueryDict, files: MultiValueDict, name: str\n ) -> list[tuple[str, str]]:\n \"\"\"Prepare structure stored in database. The structure is tied to\n `CommunityRole.custom_keys` expected format:\n [\n (label1, value1),\n (label2, value2),\n ...\n ]\n \"\"\"\n try:\n values = data.getlist(name)\n except AttributeError:\n values = data.get(name, [])\n return list(zip(self.labels, values))\n\n def value_omitted_from_data(\n self, data: QueryDict, files: MultiValueDict, name: str\n ) -> bool:\n return False\n\n\nclass CustomKeysJSONField(forms.JSONField):\n def __init__(self, **kwargs):\n kwargs.setdefault(\"widget\", CustomKeysWidget)\n super().__init__(**kwargs)\n\n def apply_labels(self, labels: list[str]) -> None:\n self.labels = labels[:]\n self.widget.apply_labels(self.labels)\n", "path": "amy/communityroles/fields.py"}]}
| 1,115 | 266 |
gh_patches_debug_21536
|
rasdani/github-patches
|
git_diff
|
pymeasure__pymeasure-81
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Consider installing the tests
Since you decided to embed the test suite inside the package instead of leaving it outside in a common `tests` folder (both of which are totally acceptable and covered by the `pytest` guidelines), why not install the tests with the package? It would give the following benefits:
- Simplify the `setuptools` metadata, by replacing the explicit listing of the modules with a single call to `find_package`,
- Easy testing of the installed package via `python -m pytest --pyargs pymeasure`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #
2 # This file is part of the PyMeasure package.
3 #
4 # Copyright (c) 2013-2017 PyMeasure Developers
5 #
6 # Permission is hereby granted, free of charge, to any person obtaining a copy
7 # of this software and associated documentation files (the "Software"), to deal
8 # in the Software without restriction, including without limitation the rights
9 # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 # copies of the Software, and to permit persons to whom the Software is
11 # furnished to do so, subject to the following conditions:
12 #
13 # The above copyright notice and this permission notice shall be included in
14 # all copies or substantial portions of the Software.
15 #
16 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 # THE SOFTWARE.
23 #
24
25
26 from setuptools import setup
27
28 setup(
29 name='PyMeasure',
30 version='0.4.3',
31 author='PyMeasure Developers',
32 packages=[
33 'pymeasure', 'pymeasure.instruments',
34 'pymeasure.adapters', 'pymeasure.display',
35 'pymeasure.experiment',
36 'pymeasure.instruments.agilent',
37 'pymeasure.instruments.ami',
38 'pymeasure.instruments.anritsu',
39 'pymeasure.instruments.danfysik',
40 'pymeasure.instruments.fwbell',
41 'pymeasure.instruments.hp',
42 'pymeasure.instruments.keithley',
43 'pymeasure.instruments.lakeshore',
44 'pymeasure.instruments.newport',
45 'pymeasure.instruments.parker',
46 'pymeasure.instruments.signalrecovery',
47 'pymeasure.instruments.srs',
48 'pymeasure.instruments.thorlabs',
49 'pymeasure.instruments.tektronix',
50 'pymeasure.instruments.yokogawa',
51 ],
52 scripts=[],
53 url='https://github.com/ralph-group/pymeasure',
54 download_url = 'https://github.com/ralph-group/pymeasure/tarball/v0.4.3',
55 license='MIT License',
56 description='Scientific measurement library for instruments, experiments, and live-plotting',
57 long_description=open('README.rst').read() + "\n\n" + open('CHANGES.txt').read(),
58 install_requires=[
59 "Numpy >= 1.6.1",
60 "pandas >= 0.14",
61 "pyvisa >= 1.8",
62 "pyserial >= 2.7",
63 "pyqtgraph >= 0.9.10"
64 ],
65 classifiers=[
66 "Development Status :: 4 - Beta",
67 "Intended Audience :: Science/Research",
68 "License :: OSI Approved :: MIT License",
69 "Operating System :: MacOS",
70 "Operating System :: Microsoft :: Windows",
71 "Operating System :: POSIX",
72 "Operating System :: Unix",
73 "Programming Language :: Python :: 3 :: Only",
74 "Topic :: Scientific/Engineering",
75 ],
76 keywords="measure instrument experiment control automate graph plot"
77 )
78
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -23,32 +23,13 @@
#
-from setuptools import setup
+from setuptools import setup, find_packages
setup(
name='PyMeasure',
version='0.4.3',
author='PyMeasure Developers',
- packages=[
- 'pymeasure', 'pymeasure.instruments',
- 'pymeasure.adapters', 'pymeasure.display',
- 'pymeasure.experiment',
- 'pymeasure.instruments.agilent',
- 'pymeasure.instruments.ami',
- 'pymeasure.instruments.anritsu',
- 'pymeasure.instruments.danfysik',
- 'pymeasure.instruments.fwbell',
- 'pymeasure.instruments.hp',
- 'pymeasure.instruments.keithley',
- 'pymeasure.instruments.lakeshore',
- 'pymeasure.instruments.newport',
- 'pymeasure.instruments.parker',
- 'pymeasure.instruments.signalrecovery',
- 'pymeasure.instruments.srs',
- 'pymeasure.instruments.thorlabs',
- 'pymeasure.instruments.tektronix',
- 'pymeasure.instruments.yokogawa',
- ],
+ packages=find_packages(),
scripts=[],
url='https://github.com/ralph-group/pymeasure',
download_url = 'https://github.com/ralph-group/pymeasure/tarball/v0.4.3',
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -23,32 +23,13 @@\n #\n \n \n-from setuptools import setup\n+from setuptools import setup, find_packages\n \n setup(\n name='PyMeasure',\n version='0.4.3',\n author='PyMeasure Developers',\n- packages=[\n- 'pymeasure', 'pymeasure.instruments',\n- 'pymeasure.adapters', 'pymeasure.display',\n- 'pymeasure.experiment',\n- 'pymeasure.instruments.agilent',\n- 'pymeasure.instruments.ami',\n- 'pymeasure.instruments.anritsu',\n- 'pymeasure.instruments.danfysik',\n- 'pymeasure.instruments.fwbell',\n- 'pymeasure.instruments.hp',\n- 'pymeasure.instruments.keithley',\n- 'pymeasure.instruments.lakeshore',\n- 'pymeasure.instruments.newport',\n- 'pymeasure.instruments.parker',\n- 'pymeasure.instruments.signalrecovery',\n- 'pymeasure.instruments.srs',\n- 'pymeasure.instruments.thorlabs',\n- 'pymeasure.instruments.tektronix',\n- 'pymeasure.instruments.yokogawa',\n- ],\n+ packages=find_packages(),\n scripts=[],\n url='https://github.com/ralph-group/pymeasure',\n download_url = 'https://github.com/ralph-group/pymeasure/tarball/v0.4.3',\n", "issue": "Consider installing the tests\nSince you decided to embed the test suite inside the package instead of leaving it outside in a common `tests` folder (both of which are totally acceptable and covered by the `pytest` guidelines), why not install the tests with the package? It would give the following benefits:\r\n\r\n- Simplify the `setuptools` metadata, by replacing the explicit listing of the modules with a single call to `find_package`,\r\n- Easy testing of the installed package via `python -m pytest --pyargs pymeasure`.\n", "before_files": [{"content": "#\n# This file is part of the PyMeasure package.\n#\n# Copyright (c) 2013-2017 PyMeasure Developers\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n#\n\n\nfrom setuptools import setup\n\nsetup(\n name='PyMeasure',\n version='0.4.3',\n author='PyMeasure Developers',\n packages=[\n 'pymeasure', 'pymeasure.instruments',\n 'pymeasure.adapters', 'pymeasure.display',\n 'pymeasure.experiment',\n 'pymeasure.instruments.agilent',\n 'pymeasure.instruments.ami',\n 'pymeasure.instruments.anritsu',\n 'pymeasure.instruments.danfysik',\n 'pymeasure.instruments.fwbell',\n 'pymeasure.instruments.hp',\n 'pymeasure.instruments.keithley',\n 'pymeasure.instruments.lakeshore',\n 'pymeasure.instruments.newport',\n 'pymeasure.instruments.parker',\n 'pymeasure.instruments.signalrecovery',\n 'pymeasure.instruments.srs',\n 'pymeasure.instruments.thorlabs',\n 'pymeasure.instruments.tektronix',\n 'pymeasure.instruments.yokogawa',\n ],\n scripts=[],\n url='https://github.com/ralph-group/pymeasure',\n download_url = 'https://github.com/ralph-group/pymeasure/tarball/v0.4.3',\n license='MIT License',\n description='Scientific measurement library for instruments, experiments, and live-plotting',\n long_description=open('README.rst').read() + \"\\n\\n\" + open('CHANGES.txt').read(),\n install_requires=[\n \"Numpy >= 1.6.1\",\n \"pandas >= 0.14\",\n \"pyvisa >= 1.8\",\n \"pyserial >= 2.7\",\n \"pyqtgraph >= 0.9.10\"\n ],\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: MacOS\",\n \"Operating System :: Microsoft :: Windows\",\n \"Operating System :: POSIX\",\n \"Operating System :: Unix\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Topic :: Scientific/Engineering\",\n ],\n keywords=\"measure instrument experiment control automate graph plot\"\n)\n", "path": "setup.py"}], "after_files": [{"content": "#\n# This file is part of the PyMeasure package.\n#\n# Copyright (c) 2013-2017 PyMeasure Developers\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n#\n\n\nfrom setuptools import setup, find_packages\n\nsetup(\n name='PyMeasure',\n version='0.4.3',\n author='PyMeasure Developers',\n packages=find_packages(),\n scripts=[],\n url='https://github.com/ralph-group/pymeasure',\n download_url = 'https://github.com/ralph-group/pymeasure/tarball/v0.4.3',\n license='MIT License',\n description='Scientific measurement library for instruments, experiments, and live-plotting',\n long_description=open('README.rst').read() + \"\\n\\n\" + open('CHANGES.txt').read(),\n install_requires=[\n \"Numpy >= 1.6.1\",\n \"pandas >= 0.14\",\n \"pyvisa >= 1.8\",\n \"pyserial >= 2.7\",\n \"pyqtgraph >= 0.9.10\"\n ],\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: MacOS\",\n \"Operating System :: Microsoft :: Windows\",\n \"Operating System :: POSIX\",\n \"Operating System :: Unix\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Topic :: Scientific/Engineering\",\n ],\n keywords=\"measure instrument experiment control automate graph plot\"\n)\n", "path": "setup.py"}]}
| 1,252 | 347 |
gh_patches_debug_22658
|
rasdani/github-patches
|
git_diff
|
DDMAL__CantusDB-942
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
List of Genres: alphabetization should be case-insensitive
The Lists of Feasts (this includes the list of all feasts as well as only the temporale and only the sanctorale) are ordered by alphabetic order of feast name, except that capital letters are currently taking precedence; a capital "Z" comes before a lowercase "a" and that muddles the system somewhat. Behold:
<img width="1119" alt="NewCantus temporale list" src="https://github.com/DDMAL/CantusDB/assets/83373378/2c4f7d2e-df3c-4a57-8c52-7acbfe96b099">
You can see that the alphabet thing is solid until Ad Processionem, after which the third letter becomes lowercase (Ad aquam benedictio) and the alphabet starts all over again. Maybe this is preferable, but OldCantus doesn't take capitalization into account, so I thought I'd mention it.
This is the same list on OldCantus, for reference:
<img width="1158" alt="OldCantus temporale list" src="https://github.com/DDMAL/CantusDB/assets/83373378/598ba519-a37a-478b-884f-a1ff6b07ee34">
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `django/cantusdb_project/main_app/views/feast.py`
Content:
```
1 from django.views.generic import DetailView, ListView
2 from main_app.models import Feast, Source
3 from extra_views import SearchableListMixin
4
5 # this categorization is not finalized yet
6 # the feastcode on old cantus requires cleaning
7 # for now we just leave this categorization as it is
8 TEMP_PREFIX = [
9 "01",
10 "02",
11 "03",
12 "04",
13 "05",
14 "06",
15 "07",
16 "08",
17 "09",
18 "10",
19 "11",
20 "16",
21 "17",
22 ]
23 SANC_PREFIX = ["12", "13", "14", "15"]
24
25
26 class FeastDetailView(DetailView):
27 model = Feast
28 context_object_name = "feast"
29 template_name = "feast_detail.html"
30
31 def get_context_data(self, **kwargs):
32 context = super().get_context_data(**kwargs)
33
34 display_unpublished = self.request.user.is_authenticated
35
36 chants_in_feast = self.get_object().chant_set
37 if not display_unpublished:
38 chants_in_feast = chants_in_feast.filter(source__published=True)
39
40 cantus_ids = list(
41 chants_in_feast.values_list("cantus_id", flat=True).distinct()
42 )
43
44 counts = []
45 incipits = []
46 genres = []
47
48 for cantus_id in cantus_ids:
49 chants = chants_in_feast.filter(cantus_id=cantus_id)
50 count = chants.count()
51 incipit = chants.first().incipit
52 genre = chants.first().genre
53
54 counts.append(count)
55 incipits.append(incipit)
56 genres.append(genre)
57
58 zipped = zip(cantus_ids, incipits, genres, counts)
59 zipped = sorted(zipped, key=lambda t: t[3], reverse=True)
60 context["frequent_chants_zip"] = zipped
61
62 source_ids = list(
63 chants_in_feast.values_list("source__id", flat=True).distinct()
64 )
65 sources = Source.objects.filter(id__in=source_ids)
66 counts = [chants_in_feast.filter(source=source).count() for source in sources]
67
68 zipped = zip(sources, counts)
69 zipped = sorted(zipped, key=lambda t: t[1], reverse=True)
70 context["sources_zip"] = zipped
71
72 return context
73
74
75 class FeastListView(SearchableListMixin, ListView):
76 """Searchable List view for Feast model
77
78 Accessed by /feasts/
79
80 When passed a ``?q=<query>`` argument in the GET request, it will filter feasts
81 based on the fields defined in ``search_fields`` with the ``icontains`` lookup
82
83 The feasts can also be filtered by `date` (temp/sanc) and `month` and ordered by `sort_by`,
84 which are also passed as GET parameters
85 """
86
87 model = Feast
88 search_fields = ["name", "description", "feast_code"]
89 paginate_by = 100
90 context_object_name = "feasts"
91 template_name = "feast_list.html"
92
93 def get_ordering(self):
94 ordering = self.request.GET.get("sort_by")
95 # feasts can be ordered by name or feast_code,
96 # default to ordering by name if given anything else
97 if ordering not in ["name", "feast_code"]:
98 ordering = "name"
99 return ordering
100
101 def get_queryset(self):
102 queryset = super().get_queryset()
103 date = self.request.GET.get("date")
104 month = self.request.GET.get("month")
105 # temp vs sanc categorization is not finalized yet,
106 # the feastcode needs to be updated by the cantus people
107 if date == "temp":
108 queryset = queryset.filter(prefix__in=TEMP_PREFIX)
109 elif date == "sanc":
110 queryset = queryset.filter(prefix__in=SANC_PREFIX)
111
112 if month and (int(month)) in range(1, 13):
113 month = int(month)
114 queryset = queryset.filter(month=month)
115
116 return queryset
117
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/django/cantusdb_project/main_app/views/feast.py b/django/cantusdb_project/main_app/views/feast.py
--- a/django/cantusdb_project/main_app/views/feast.py
+++ b/django/cantusdb_project/main_app/views/feast.py
@@ -1,4 +1,5 @@
from django.views.generic import DetailView, ListView
+from django.db.models.functions import Lower
from main_app.models import Feast, Source
from extra_views import SearchableListMixin
@@ -90,13 +91,14 @@
context_object_name = "feasts"
template_name = "feast_list.html"
- def get_ordering(self):
+ def get_ordering(self) -> tuple:
ordering = self.request.GET.get("sort_by")
# feasts can be ordered by name or feast_code,
# default to ordering by name if given anything else
if ordering not in ["name", "feast_code"]:
ordering = "name"
- return ordering
+ # case insensitive ordering by name
+ return (Lower(ordering),) if ordering == "name" else (ordering,)
def get_queryset(self):
queryset = super().get_queryset()
|
{"golden_diff": "diff --git a/django/cantusdb_project/main_app/views/feast.py b/django/cantusdb_project/main_app/views/feast.py\n--- a/django/cantusdb_project/main_app/views/feast.py\n+++ b/django/cantusdb_project/main_app/views/feast.py\n@@ -1,4 +1,5 @@\n from django.views.generic import DetailView, ListView\n+from django.db.models.functions import Lower\n from main_app.models import Feast, Source\n from extra_views import SearchableListMixin\n \n@@ -90,13 +91,14 @@\n context_object_name = \"feasts\"\n template_name = \"feast_list.html\"\n \n- def get_ordering(self):\n+ def get_ordering(self) -> tuple:\n ordering = self.request.GET.get(\"sort_by\")\n # feasts can be ordered by name or feast_code,\n # default to ordering by name if given anything else\n if ordering not in [\"name\", \"feast_code\"]:\n ordering = \"name\"\n- return ordering\n+ # case insensitive ordering by name\n+ return (Lower(ordering),) if ordering == \"name\" else (ordering,)\n \n def get_queryset(self):\n queryset = super().get_queryset()\n", "issue": "List of Genres: alphabetization should be case-insensitive\nThe Lists of Feasts (this includes the list of all feasts as well as only the temporale and only the sanctorale) are ordered by alphabetic order of feast name, except that capital letters are currently taking precedence; a capital \"Z\" comes before a lowercase \"a\" and that muddles the system somewhat. Behold:\r\n\r\n<img width=\"1119\" alt=\"NewCantus temporale list\" src=\"https://github.com/DDMAL/CantusDB/assets/83373378/2c4f7d2e-df3c-4a57-8c52-7acbfe96b099\">\r\n\r\nYou can see that the alphabet thing is solid until Ad Processionem, after which the third letter becomes lowercase (Ad aquam benedictio) and the alphabet starts all over again. Maybe this is preferable, but OldCantus doesn't take capitalization into account, so I thought I'd mention it.\r\n\r\nThis is the same list on OldCantus, for reference: \r\n\r\n<img width=\"1158\" alt=\"OldCantus temporale list\" src=\"https://github.com/DDMAL/CantusDB/assets/83373378/598ba519-a37a-478b-884f-a1ff6b07ee34\">\r\n\n", "before_files": [{"content": "from django.views.generic import DetailView, ListView\nfrom main_app.models import Feast, Source\nfrom extra_views import SearchableListMixin\n\n# this categorization is not finalized yet\n# the feastcode on old cantus requires cleaning\n# for now we just leave this categorization as it is\nTEMP_PREFIX = [\n \"01\",\n \"02\",\n \"03\",\n \"04\",\n \"05\",\n \"06\",\n \"07\",\n \"08\",\n \"09\",\n \"10\",\n \"11\",\n \"16\",\n \"17\",\n]\nSANC_PREFIX = [\"12\", \"13\", \"14\", \"15\"]\n\n\nclass FeastDetailView(DetailView):\n model = Feast\n context_object_name = \"feast\"\n template_name = \"feast_detail.html\"\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n\n display_unpublished = self.request.user.is_authenticated\n\n chants_in_feast = self.get_object().chant_set\n if not display_unpublished:\n chants_in_feast = chants_in_feast.filter(source__published=True)\n\n cantus_ids = list(\n chants_in_feast.values_list(\"cantus_id\", flat=True).distinct()\n )\n\n counts = []\n incipits = []\n genres = []\n\n for cantus_id in cantus_ids:\n chants = chants_in_feast.filter(cantus_id=cantus_id)\n count = chants.count()\n incipit = chants.first().incipit\n genre = chants.first().genre\n\n counts.append(count)\n incipits.append(incipit)\n genres.append(genre)\n\n zipped = zip(cantus_ids, incipits, genres, counts)\n zipped = sorted(zipped, key=lambda t: t[3], reverse=True)\n context[\"frequent_chants_zip\"] = zipped\n\n source_ids = list(\n chants_in_feast.values_list(\"source__id\", flat=True).distinct()\n )\n sources = Source.objects.filter(id__in=source_ids)\n counts = [chants_in_feast.filter(source=source).count() for source in sources]\n\n zipped = zip(sources, counts)\n zipped = sorted(zipped, key=lambda t: t[1], reverse=True)\n context[\"sources_zip\"] = zipped\n\n return context\n\n\nclass FeastListView(SearchableListMixin, ListView):\n \"\"\"Searchable List view for Feast model\n\n Accessed by /feasts/\n\n When passed a ``?q=<query>`` argument in the GET request, it will filter feasts\n based on the fields defined in ``search_fields`` with the ``icontains`` lookup\n\n The feasts can also be filtered by `date` (temp/sanc) and `month` and ordered by `sort_by`,\n which are also passed as GET parameters\n \"\"\"\n\n model = Feast\n search_fields = [\"name\", \"description\", \"feast_code\"]\n paginate_by = 100\n context_object_name = \"feasts\"\n template_name = \"feast_list.html\"\n\n def get_ordering(self):\n ordering = self.request.GET.get(\"sort_by\")\n # feasts can be ordered by name or feast_code,\n # default to ordering by name if given anything else\n if ordering not in [\"name\", \"feast_code\"]:\n ordering = \"name\"\n return ordering\n\n def get_queryset(self):\n queryset = super().get_queryset()\n date = self.request.GET.get(\"date\")\n month = self.request.GET.get(\"month\")\n # temp vs sanc categorization is not finalized yet,\n # the feastcode needs to be updated by the cantus people\n if date == \"temp\":\n queryset = queryset.filter(prefix__in=TEMP_PREFIX)\n elif date == \"sanc\":\n queryset = queryset.filter(prefix__in=SANC_PREFIX)\n\n if month and (int(month)) in range(1, 13):\n month = int(month)\n queryset = queryset.filter(month=month)\n\n return queryset\n", "path": "django/cantusdb_project/main_app/views/feast.py"}], "after_files": [{"content": "from django.views.generic import DetailView, ListView\nfrom django.db.models.functions import Lower\nfrom main_app.models import Feast, Source\nfrom extra_views import SearchableListMixin\n\n# this categorization is not finalized yet\n# the feastcode on old cantus requires cleaning\n# for now we just leave this categorization as it is\nTEMP_PREFIX = [\n \"01\",\n \"02\",\n \"03\",\n \"04\",\n \"05\",\n \"06\",\n \"07\",\n \"08\",\n \"09\",\n \"10\",\n \"11\",\n \"16\",\n \"17\",\n]\nSANC_PREFIX = [\"12\", \"13\", \"14\", \"15\"]\n\n\nclass FeastDetailView(DetailView):\n model = Feast\n context_object_name = \"feast\"\n template_name = \"feast_detail.html\"\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n\n display_unpublished = self.request.user.is_authenticated\n\n chants_in_feast = self.get_object().chant_set\n if not display_unpublished:\n chants_in_feast = chants_in_feast.filter(source__published=True)\n\n cantus_ids = list(\n chants_in_feast.values_list(\"cantus_id\", flat=True).distinct()\n )\n\n counts = []\n incipits = []\n genres = []\n\n for cantus_id in cantus_ids:\n chants = chants_in_feast.filter(cantus_id=cantus_id)\n count = chants.count()\n incipit = chants.first().incipit\n genre = chants.first().genre\n\n counts.append(count)\n incipits.append(incipit)\n genres.append(genre)\n\n zipped = zip(cantus_ids, incipits, genres, counts)\n zipped = sorted(zipped, key=lambda t: t[3], reverse=True)\n context[\"frequent_chants_zip\"] = zipped\n\n source_ids = list(\n chants_in_feast.values_list(\"source__id\", flat=True).distinct()\n )\n sources = Source.objects.filter(id__in=source_ids)\n counts = [chants_in_feast.filter(source=source).count() for source in sources]\n\n zipped = zip(sources, counts)\n zipped = sorted(zipped, key=lambda t: t[1], reverse=True)\n context[\"sources_zip\"] = zipped\n\n return context\n\n\nclass FeastListView(SearchableListMixin, ListView):\n \"\"\"Searchable List view for Feast model\n\n Accessed by /feasts/\n\n When passed a ``?q=<query>`` argument in the GET request, it will filter feasts\n based on the fields defined in ``search_fields`` with the ``icontains`` lookup\n\n The feasts can also be filtered by `date` (temp/sanc) and `month` and ordered by `sort_by`,\n which are also passed as GET parameters\n \"\"\"\n\n model = Feast\n search_fields = [\"name\", \"description\", \"feast_code\"]\n paginate_by = 100\n context_object_name = \"feasts\"\n template_name = \"feast_list.html\"\n\n def get_ordering(self) -> tuple:\n ordering = self.request.GET.get(\"sort_by\")\n # feasts can be ordered by name or feast_code,\n # default to ordering by name if given anything else\n if ordering not in [\"name\", \"feast_code\"]:\n ordering = \"name\"\n # case insensitive ordering by name\n return (Lower(ordering),) if ordering == \"name\" else (ordering,)\n\n def get_queryset(self):\n queryset = super().get_queryset()\n date = self.request.GET.get(\"date\")\n month = self.request.GET.get(\"month\")\n # temp vs sanc categorization is not finalized yet,\n # the feastcode needs to be updated by the cantus people\n if date == \"temp\":\n queryset = queryset.filter(prefix__in=TEMP_PREFIX)\n elif date == \"sanc\":\n queryset = queryset.filter(prefix__in=SANC_PREFIX)\n\n if month and (int(month)) in range(1, 13):\n month = int(month)\n queryset = queryset.filter(month=month)\n\n return queryset\n", "path": "django/cantusdb_project/main_app/views/feast.py"}]}
| 1,718 | 271 |
gh_patches_debug_5544
|
rasdani/github-patches
|
git_diff
|
mozilla__pontoon-2777
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Drop contributor profile URL with email addresses
We should drop support for accessing contributor profiles by using user email addresses in the URLs. This feature, while not used internally in the app, leaks otherwise hidden user email addressed on sites like on web.archive.org index.
This is particularly problematic for users without any contributions.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pontoon/contributors/urls.py`
Content:
```
1 from django.urls import path, register_converter
2 from django.urls.converters import StringConverter
3 from django.views.generic import RedirectView
4
5 from . import views
6
7
8 class EmailConverter(StringConverter):
9 regex = r"[\w.%+-]+@[A-Za-z0-9.-]+\.[A-Za-z]{2,4}"
10
11
12 class UsernameConverter(StringConverter):
13 regex = r"[\w.@+-]+"
14
15
16 register_converter(EmailConverter, "email")
17 register_converter(UsernameConverter, "username")
18
19 urlpatterns = [
20 # Legacy: Redirect to /contributors/email
21 path(
22 "contributor/<email:email>/",
23 RedirectView.as_view(url="/contributors/%(email)s/", permanent=True),
24 ),
25 # List contributors
26 path(
27 "contributors/",
28 views.ContributorsView.as_view(),
29 name="pontoon.contributors",
30 ),
31 # Contributor profile by email
32 path(
33 "contributors/<email:email>/",
34 views.contributor_email,
35 name="pontoon.contributors.contributor.email",
36 ),
37 # Contributor profile by username
38 path(
39 "contributors/<username:username>/",
40 views.contributor_username,
41 name="pontoon.contributors.contributor.username",
42 ),
43 # Verify email address
44 path(
45 "verify-email-address/<str:token>/",
46 views.verify_email_address,
47 name="pontoon.contributors.verify.email",
48 ),
49 # Current user's profile
50 path("profile/", views.profile, name="pontoon.contributors.profile"),
51 # Current user's settings
52 path("settings/", views.settings, name="pontoon.contributors.settings"),
53 # Current user's notifications
54 path(
55 "notifications/",
56 views.notifications,
57 name="pontoon.contributors.notifications",
58 ),
59 # Mark current user's notifications as read
60 path(
61 "notifications/mark-all-as-read/",
62 views.mark_all_notifications_as_read,
63 name="pontoon.contributors.notifications.mark.all.as.read",
64 ),
65 # API: Toggle user profile attribute
66 path(
67 "api/v1/user/<username:username>/",
68 views.toggle_user_profile_attribute,
69 name="pontoon.contributors.toggle_user_profile_attribute",
70 ),
71 # AJAX: Save custom homepage
72 path(
73 "save-custom-homepage/",
74 views.save_custom_homepage,
75 name="pontoon.contributors.save_custom_homepage",
76 ),
77 # AJAX: Save preferred source locale
78 path(
79 "save-preferred-source-locale/",
80 views.save_preferred_source_locale,
81 name="pontoon.contributors.save_preferred_source_locale",
82 ),
83 # AJAX: Dismiss Add-On Promotion
84 path(
85 "dismiss-addon-promotion/",
86 views.dismiss_addon_promotion,
87 name="pontoon.contributors.dismiss_addon_promotion",
88 ),
89 # AJAX: Update contribution graph
90 path(
91 "update-contribution-graph/",
92 views.update_contribution_graph,
93 name="pontoon.contributors.update_contribution_graph",
94 ),
95 # AJAX: Update contribution timeline
96 path(
97 "update-contribution-timeline/",
98 views.update_contribution_timeline,
99 name="pontoon.contributors.update_contribution_timeline",
100 ),
101 ]
102
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pontoon/contributors/urls.py b/pontoon/contributors/urls.py
--- a/pontoon/contributors/urls.py
+++ b/pontoon/contributors/urls.py
@@ -28,12 +28,6 @@
views.ContributorsView.as_view(),
name="pontoon.contributors",
),
- # Contributor profile by email
- path(
- "contributors/<email:email>/",
- views.contributor_email,
- name="pontoon.contributors.contributor.email",
- ),
# Contributor profile by username
path(
"contributors/<username:username>/",
|
{"golden_diff": "diff --git a/pontoon/contributors/urls.py b/pontoon/contributors/urls.py\n--- a/pontoon/contributors/urls.py\n+++ b/pontoon/contributors/urls.py\n@@ -28,12 +28,6 @@\n views.ContributorsView.as_view(),\n name=\"pontoon.contributors\",\n ),\n- # Contributor profile by email\n- path(\n- \"contributors/<email:email>/\",\n- views.contributor_email,\n- name=\"pontoon.contributors.contributor.email\",\n- ),\n # Contributor profile by username\n path(\n \"contributors/<username:username>/\",\n", "issue": "Drop contributor profile URL with email addresses\nWe should drop support for accessing contributor profiles by using user email addresses in the URLs. This feature, while not used internally in the app, leaks otherwise hidden user email addressed on sites like on web.archive.org index.\r\n\r\nThis is particularly problematic for users without any contributions.\r\n\n", "before_files": [{"content": "from django.urls import path, register_converter\nfrom django.urls.converters import StringConverter\nfrom django.views.generic import RedirectView\n\nfrom . import views\n\n\nclass EmailConverter(StringConverter):\n regex = r\"[\\w.%+-]+@[A-Za-z0-9.-]+\\.[A-Za-z]{2,4}\"\n\n\nclass UsernameConverter(StringConverter):\n regex = r\"[\\w.@+-]+\"\n\n\nregister_converter(EmailConverter, \"email\")\nregister_converter(UsernameConverter, \"username\")\n\nurlpatterns = [\n # Legacy: Redirect to /contributors/email\n path(\n \"contributor/<email:email>/\",\n RedirectView.as_view(url=\"/contributors/%(email)s/\", permanent=True),\n ),\n # List contributors\n path(\n \"contributors/\",\n views.ContributorsView.as_view(),\n name=\"pontoon.contributors\",\n ),\n # Contributor profile by email\n path(\n \"contributors/<email:email>/\",\n views.contributor_email,\n name=\"pontoon.contributors.contributor.email\",\n ),\n # Contributor profile by username\n path(\n \"contributors/<username:username>/\",\n views.contributor_username,\n name=\"pontoon.contributors.contributor.username\",\n ),\n # Verify email address\n path(\n \"verify-email-address/<str:token>/\",\n views.verify_email_address,\n name=\"pontoon.contributors.verify.email\",\n ),\n # Current user's profile\n path(\"profile/\", views.profile, name=\"pontoon.contributors.profile\"),\n # Current user's settings\n path(\"settings/\", views.settings, name=\"pontoon.contributors.settings\"),\n # Current user's notifications\n path(\n \"notifications/\",\n views.notifications,\n name=\"pontoon.contributors.notifications\",\n ),\n # Mark current user's notifications as read\n path(\n \"notifications/mark-all-as-read/\",\n views.mark_all_notifications_as_read,\n name=\"pontoon.contributors.notifications.mark.all.as.read\",\n ),\n # API: Toggle user profile attribute\n path(\n \"api/v1/user/<username:username>/\",\n views.toggle_user_profile_attribute,\n name=\"pontoon.contributors.toggle_user_profile_attribute\",\n ),\n # AJAX: Save custom homepage\n path(\n \"save-custom-homepage/\",\n views.save_custom_homepage,\n name=\"pontoon.contributors.save_custom_homepage\",\n ),\n # AJAX: Save preferred source locale\n path(\n \"save-preferred-source-locale/\",\n views.save_preferred_source_locale,\n name=\"pontoon.contributors.save_preferred_source_locale\",\n ),\n # AJAX: Dismiss Add-On Promotion\n path(\n \"dismiss-addon-promotion/\",\n views.dismiss_addon_promotion,\n name=\"pontoon.contributors.dismiss_addon_promotion\",\n ),\n # AJAX: Update contribution graph\n path(\n \"update-contribution-graph/\",\n views.update_contribution_graph,\n name=\"pontoon.contributors.update_contribution_graph\",\n ),\n # AJAX: Update contribution timeline\n path(\n \"update-contribution-timeline/\",\n views.update_contribution_timeline,\n name=\"pontoon.contributors.update_contribution_timeline\",\n ),\n]\n", "path": "pontoon/contributors/urls.py"}], "after_files": [{"content": "from django.urls import path, register_converter\nfrom django.urls.converters import StringConverter\nfrom django.views.generic import RedirectView\n\nfrom . import views\n\n\nclass EmailConverter(StringConverter):\n regex = r\"[\\w.%+-]+@[A-Za-z0-9.-]+\\.[A-Za-z]{2,4}\"\n\n\nclass UsernameConverter(StringConverter):\n regex = r\"[\\w.@+-]+\"\n\n\nregister_converter(EmailConverter, \"email\")\nregister_converter(UsernameConverter, \"username\")\n\nurlpatterns = [\n # Legacy: Redirect to /contributors/email\n path(\n \"contributor/<email:email>/\",\n RedirectView.as_view(url=\"/contributors/%(email)s/\", permanent=True),\n ),\n # List contributors\n path(\n \"contributors/\",\n views.ContributorsView.as_view(),\n name=\"pontoon.contributors\",\n ),\n # Contributor profile by username\n path(\n \"contributors/<username:username>/\",\n views.contributor_username,\n name=\"pontoon.contributors.contributor.username\",\n ),\n # Verify email address\n path(\n \"verify-email-address/<str:token>/\",\n views.verify_email_address,\n name=\"pontoon.contributors.verify.email\",\n ),\n # Current user's profile\n path(\"profile/\", views.profile, name=\"pontoon.contributors.profile\"),\n # Current user's settings\n path(\"settings/\", views.settings, name=\"pontoon.contributors.settings\"),\n # Current user's notifications\n path(\n \"notifications/\",\n views.notifications,\n name=\"pontoon.contributors.notifications\",\n ),\n # Mark current user's notifications as read\n path(\n \"notifications/mark-all-as-read/\",\n views.mark_all_notifications_as_read,\n name=\"pontoon.contributors.notifications.mark.all.as.read\",\n ),\n # API: Toggle user profile attribute\n path(\n \"api/v1/user/<username:username>/\",\n views.toggle_user_profile_attribute,\n name=\"pontoon.contributors.toggle_user_profile_attribute\",\n ),\n # AJAX: Save custom homepage\n path(\n \"save-custom-homepage/\",\n views.save_custom_homepage,\n name=\"pontoon.contributors.save_custom_homepage\",\n ),\n # AJAX: Save preferred source locale\n path(\n \"save-preferred-source-locale/\",\n views.save_preferred_source_locale,\n name=\"pontoon.contributors.save_preferred_source_locale\",\n ),\n # AJAX: Dismiss Add-On Promotion\n path(\n \"dismiss-addon-promotion/\",\n views.dismiss_addon_promotion,\n name=\"pontoon.contributors.dismiss_addon_promotion\",\n ),\n # AJAX: Update contribution graph\n path(\n \"update-contribution-graph/\",\n views.update_contribution_graph,\n name=\"pontoon.contributors.update_contribution_graph\",\n ),\n # AJAX: Update contribution timeline\n path(\n \"update-contribution-timeline/\",\n views.update_contribution_timeline,\n name=\"pontoon.contributors.update_contribution_timeline\",\n ),\n]\n", "path": "pontoon/contributors/urls.py"}]}
| 1,205 | 145 |
gh_patches_debug_1910
|
rasdani/github-patches
|
git_diff
|
gratipay__gratipay.com-2699
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[email protected] still linked several places
Should be [email protected], right? ;-)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `gratipay/exceptions.py`
Content:
```
1 """
2 This module contains exceptions shared across application code.
3 """
4
5 from __future__ import print_function, unicode_literals
6
7
8 class ProblemChangingUsername(Exception):
9 def __str__(self):
10 return self.msg.format(self.args[0])
11
12 class UsernameIsEmpty(ProblemChangingUsername):
13 msg = "You need to provide a username!"
14
15 class UsernameTooLong(ProblemChangingUsername):
16 msg = "The username '{}' is too long."
17
18 class UsernameContainsInvalidCharacters(ProblemChangingUsername):
19 msg = "The username '{}' contains invalid characters."
20
21 class UsernameIsRestricted(ProblemChangingUsername):
22 msg = "The username '{}' is restricted."
23
24 class UsernameAlreadyTaken(ProblemChangingUsername):
25 msg = "The username '{}' is already taken."
26
27
28 class ProblemChangingNumber(Exception):
29 def __str__(self):
30 return self.msg
31
32 class HasBigTips(ProblemChangingNumber):
33 msg = "You receive tips too large for an individual. Please contact [email protected]."
34
35
36 class TooGreedy(Exception): pass
37 class NoSelfTipping(Exception): pass
38 class NoTippee(Exception): pass
39 class BadAmount(Exception): pass
40 class UserDoesntAcceptTips(Exception): pass
41
42 class FailedToReserveUsername(Exception): pass
43
44 class NegativeBalance(Exception):
45 def __str__(self):
46 return "Negative balance not allowed in this context."
47
48 class NotWhitelisted(Exception): pass
49 class NoBalancedCustomerHref(Exception): pass
50
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/gratipay/exceptions.py b/gratipay/exceptions.py
--- a/gratipay/exceptions.py
+++ b/gratipay/exceptions.py
@@ -30,7 +30,7 @@
return self.msg
class HasBigTips(ProblemChangingNumber):
- msg = "You receive tips too large for an individual. Please contact [email protected]."
+ msg = "You receive tips too large for an individual. Please contact [email protected]."
class TooGreedy(Exception): pass
|
{"golden_diff": "diff --git a/gratipay/exceptions.py b/gratipay/exceptions.py\n--- a/gratipay/exceptions.py\n+++ b/gratipay/exceptions.py\n@@ -30,7 +30,7 @@\n return self.msg\n \n class HasBigTips(ProblemChangingNumber):\n- msg = \"You receive tips too large for an individual. Please contact [email protected].\"\n+ msg = \"You receive tips too large for an individual. Please contact [email protected].\"\n \n \n class TooGreedy(Exception): pass\n", "issue": "[email protected] still linked several places\nShould be [email protected], right? ;-)\n\n", "before_files": [{"content": "\"\"\"\nThis module contains exceptions shared across application code.\n\"\"\"\n\nfrom __future__ import print_function, unicode_literals\n\n\nclass ProblemChangingUsername(Exception):\n def __str__(self):\n return self.msg.format(self.args[0])\n\nclass UsernameIsEmpty(ProblemChangingUsername):\n msg = \"You need to provide a username!\"\n\nclass UsernameTooLong(ProblemChangingUsername):\n msg = \"The username '{}' is too long.\"\n\nclass UsernameContainsInvalidCharacters(ProblemChangingUsername):\n msg = \"The username '{}' contains invalid characters.\"\n\nclass UsernameIsRestricted(ProblemChangingUsername):\n msg = \"The username '{}' is restricted.\"\n\nclass UsernameAlreadyTaken(ProblemChangingUsername):\n msg = \"The username '{}' is already taken.\"\n\n\nclass ProblemChangingNumber(Exception):\n def __str__(self):\n return self.msg\n\nclass HasBigTips(ProblemChangingNumber):\n msg = \"You receive tips too large for an individual. Please contact [email protected].\"\n\n\nclass TooGreedy(Exception): pass\nclass NoSelfTipping(Exception): pass\nclass NoTippee(Exception): pass\nclass BadAmount(Exception): pass\nclass UserDoesntAcceptTips(Exception): pass\n\nclass FailedToReserveUsername(Exception): pass\n\nclass NegativeBalance(Exception):\n def __str__(self):\n return \"Negative balance not allowed in this context.\"\n\nclass NotWhitelisted(Exception): pass\nclass NoBalancedCustomerHref(Exception): pass\n", "path": "gratipay/exceptions.py"}], "after_files": [{"content": "\"\"\"\nThis module contains exceptions shared across application code.\n\"\"\"\n\nfrom __future__ import print_function, unicode_literals\n\n\nclass ProblemChangingUsername(Exception):\n def __str__(self):\n return self.msg.format(self.args[0])\n\nclass UsernameIsEmpty(ProblemChangingUsername):\n msg = \"You need to provide a username!\"\n\nclass UsernameTooLong(ProblemChangingUsername):\n msg = \"The username '{}' is too long.\"\n\nclass UsernameContainsInvalidCharacters(ProblemChangingUsername):\n msg = \"The username '{}' contains invalid characters.\"\n\nclass UsernameIsRestricted(ProblemChangingUsername):\n msg = \"The username '{}' is restricted.\"\n\nclass UsernameAlreadyTaken(ProblemChangingUsername):\n msg = \"The username '{}' is already taken.\"\n\n\nclass ProblemChangingNumber(Exception):\n def __str__(self):\n return self.msg\n\nclass HasBigTips(ProblemChangingNumber):\n msg = \"You receive tips too large for an individual. Please contact [email protected].\"\n\n\nclass TooGreedy(Exception): pass\nclass NoSelfTipping(Exception): pass\nclass NoTippee(Exception): pass\nclass BadAmount(Exception): pass\nclass UserDoesntAcceptTips(Exception): pass\n\nclass FailedToReserveUsername(Exception): pass\n\nclass NegativeBalance(Exception):\n def __str__(self):\n return \"Negative balance not allowed in this context.\"\n\nclass NotWhitelisted(Exception): pass\nclass NoBalancedCustomerHref(Exception): pass\n", "path": "gratipay/exceptions.py"}]}
| 676 | 116 |
gh_patches_debug_40189
|
rasdani/github-patches
|
git_diff
|
mne-tools__mne-python-3423
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Memory consumption in plot_brainstorm_phantom_elekta
Circle is failing because it's using so much memory.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `tutorials/plot_brainstorm_phantom_elekta.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 """
3 ==========================================
4 Brainstorm Elekta phantom tutorial dataset
5 ==========================================
6
7 Here we compute the evoked from raw for the Brainstorm Elekta phantom
8 tutorial dataset. For comparison, see [1]_ and:
9
10 http://neuroimage.usc.edu/brainstorm/Tutorials/PhantomElekta
11
12 References
13 ----------
14 .. [1] Tadel F, Baillet S, Mosher JC, Pantazis D, Leahy RM.
15 Brainstorm: A User-Friendly Application for MEG/EEG Analysis.
16 Computational Intelligence and Neuroscience, vol. 2011, Article ID
17 879716, 13 pages, 2011. doi:10.1155/2011/879716
18 """
19
20 # Authors: Eric Larson <[email protected]>
21 #
22 # License: BSD (3-clause)
23
24 import os.path as op
25 import numpy as np
26
27 import mne
28 from mne import find_events, fit_dipole
29 from mne.datasets.brainstorm import bst_phantom_elekta
30 from mne.io import read_raw_fif
31
32 print(__doc__)
33
34 ###############################################################################
35 # The data were collected with an Elekta Neuromag VectorView system at 1000 Hz
36 # and low-pass filtered at 330 Hz. Here the medium-amplitude (200 nAm) data
37 # are read to construct instances of :class:`mne.io.Raw`.
38 data_path = bst_phantom_elekta.data_path()
39
40 raw_fname = op.join(data_path, 'kojak_all_200nAm_pp_no_chpi_no_ms_raw.fif')
41 raw = read_raw_fif(raw_fname, preload=True)
42
43 ###############################################################################
44 # Data channel array consisted of 204 MEG planor gradiometers,
45 # 102 axial magnetometers, and 3 stimulus channels. Let's get the events
46 # for the phantom, where each dipole (1-32) gets its own event:
47
48 events = find_events(raw, 'STI201')
49 raw.plot(events=events)
50 raw.info['bads'] = ['MEG2421']
51
52 ###############################################################################
53 # The data have strong line frequency (60 Hz and harmonics) and cHPI coil
54 # noise (five peaks around 300 Hz):
55
56 raw.plot_psd()
57
58 ###############################################################################
59 # We know our phantom produces sinusoidal bursts below 25 Hz, so let's filter.
60
61 raw.filter(None, 40., h_trans_bandwidth=10., filter_length='1s')
62 raw.plot_psd()
63
64 ###############################################################################
65 # The data are still a bit noisy, so let's use Maxwell filtering to clean it.
66 # Ideally we would have the fine calibration and cross-talk information
67 # for the site of interest, but we don't, so we just do:
68
69 raw.fix_mag_coil_types()
70 raw = mne.preprocessing.maxwell_filter(raw, origin=(0., 0., 0.))
71 raw.plot(events=events)
72
73 ###############################################################################
74 # Now we epoch our data, average it, and look at the first dipole response.
75 # The first peak appears around 3 ms.
76
77 tmin, tmax = -0.2, 0.2
78 event_id = list(range(1, 33))
79 epochs = mne.Epochs(raw, events, event_id, tmin, tmax, baseline=(None, -0.01))
80 epochs['1'].average().plot()
81
82 ###############################################################################
83 # Let's do some dipole fits. The phantom is properly modeled by a single-shell
84 # sphere with origin (0., 0., 0.). We compute covariance, then do the fits.
85
86 t_peak = 0.061 # 3 MS at largest peak
87 sphere = mne.make_sphere_model(r0=(0., 0., 0.), head_radius=None)
88 cov = mne.compute_covariance(epochs, tmax=0)
89 data = []
90 for ii in range(1, 33):
91 evoked = epochs[str(ii)].average().crop(t_peak, t_peak)
92 data.append(evoked.data[:, 0])
93 evoked = mne.EvokedArray(np.array(data).T, evoked.info, tmin=0.)
94 dip = fit_dipole(evoked, cov, sphere, n_jobs=2)[0]
95
96 ###############################################################################
97 # Now we can compare to the actual locations, taking the difference in mm:
98
99 actual_pos = mne.dipole.get_phantom_dipoles(kind='122')[0]
100 diffs = 1000 * np.sqrt(np.sum((dip.pos - actual_pos) ** 2, axis=-1))
101 print('Differences (mm):\n%s' % diffs[:, np.newaxis])
102
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/tutorials/plot_brainstorm_phantom_elekta.py b/tutorials/plot_brainstorm_phantom_elekta.py
--- a/tutorials/plot_brainstorm_phantom_elekta.py
+++ b/tutorials/plot_brainstorm_phantom_elekta.py
@@ -38,7 +38,7 @@
data_path = bst_phantom_elekta.data_path()
raw_fname = op.join(data_path, 'kojak_all_200nAm_pp_no_chpi_no_ms_raw.fif')
-raw = read_raw_fif(raw_fname, preload=True)
+raw = read_raw_fif(raw_fname)
###############################################################################
# Data channel array consisted of 204 MEG planor gradiometers,
@@ -51,39 +51,41 @@
###############################################################################
# The data have strong line frequency (60 Hz and harmonics) and cHPI coil
-# noise (five peaks around 300 Hz):
+# noise (five peaks around 300 Hz). Here we plot only out to 60 seconds
+# to save memory:
-raw.plot_psd()
+raw.plot_psd(tmax=60.)
###############################################################################
-# We know our phantom produces sinusoidal bursts below 25 Hz, so let's filter.
-
-raw.filter(None, 40., h_trans_bandwidth=10., filter_length='1s')
-raw.plot_psd()
-
-###############################################################################
-# The data are still a bit noisy, so let's use Maxwell filtering to clean it.
+# Let's use Maxwell filtering to clean the data a bit.
# Ideally we would have the fine calibration and cross-talk information
# for the site of interest, but we don't, so we just do:
raw.fix_mag_coil_types()
raw = mne.preprocessing.maxwell_filter(raw, origin=(0., 0., 0.))
+
+###############################################################################
+# We know our phantom produces sinusoidal bursts below 25 Hz, so let's filter.
+
+raw.filter(None, 40., h_trans_bandwidth=10., filter_length='1s')
raw.plot(events=events)
###############################################################################
# Now we epoch our data, average it, and look at the first dipole response.
-# The first peak appears around 3 ms.
+# The first peak appears around 3 ms. Because we low-passed at 40 Hz,
+# we can also decimate our data to save memory.
-tmin, tmax = -0.2, 0.2
+tmin, tmax = -0.1, 0.1
event_id = list(range(1, 33))
-epochs = mne.Epochs(raw, events, event_id, tmin, tmax, baseline=(None, -0.01))
+epochs = mne.Epochs(raw, events, event_id, tmin, tmax, baseline=(None, -0.01),
+ decim=5)
epochs['1'].average().plot()
###############################################################################
# Let's do some dipole fits. The phantom is properly modeled by a single-shell
# sphere with origin (0., 0., 0.). We compute covariance, then do the fits.
-t_peak = 0.061 # 3 MS at largest peak
+t_peak = 60e-3 # ~60 MS at largest peak
sphere = mne.make_sphere_model(r0=(0., 0., 0.), head_radius=None)
cov = mne.compute_covariance(epochs, tmax=0)
data = []
|
{"golden_diff": "diff --git a/tutorials/plot_brainstorm_phantom_elekta.py b/tutorials/plot_brainstorm_phantom_elekta.py\n--- a/tutorials/plot_brainstorm_phantom_elekta.py\n+++ b/tutorials/plot_brainstorm_phantom_elekta.py\n@@ -38,7 +38,7 @@\n data_path = bst_phantom_elekta.data_path()\n \n raw_fname = op.join(data_path, 'kojak_all_200nAm_pp_no_chpi_no_ms_raw.fif')\n-raw = read_raw_fif(raw_fname, preload=True)\n+raw = read_raw_fif(raw_fname)\n \n ###############################################################################\n # Data channel array consisted of 204 MEG planor gradiometers,\n@@ -51,39 +51,41 @@\n \n ###############################################################################\n # The data have strong line frequency (60 Hz and harmonics) and cHPI coil\n-# noise (five peaks around 300 Hz):\n+# noise (five peaks around 300 Hz). Here we plot only out to 60 seconds\n+# to save memory:\n \n-raw.plot_psd()\n+raw.plot_psd(tmax=60.)\n \n ###############################################################################\n-# We know our phantom produces sinusoidal bursts below 25 Hz, so let's filter.\n-\n-raw.filter(None, 40., h_trans_bandwidth=10., filter_length='1s')\n-raw.plot_psd()\n-\n-###############################################################################\n-# The data are still a bit noisy, so let's use Maxwell filtering to clean it.\n+# Let's use Maxwell filtering to clean the data a bit.\n # Ideally we would have the fine calibration and cross-talk information\n # for the site of interest, but we don't, so we just do:\n \n raw.fix_mag_coil_types()\n raw = mne.preprocessing.maxwell_filter(raw, origin=(0., 0., 0.))\n+\n+###############################################################################\n+# We know our phantom produces sinusoidal bursts below 25 Hz, so let's filter.\n+\n+raw.filter(None, 40., h_trans_bandwidth=10., filter_length='1s')\n raw.plot(events=events)\n \n ###############################################################################\n # Now we epoch our data, average it, and look at the first dipole response.\n-# The first peak appears around 3 ms.\n+# The first peak appears around 3 ms. Because we low-passed at 40 Hz,\n+# we can also decimate our data to save memory.\n \n-tmin, tmax = -0.2, 0.2\n+tmin, tmax = -0.1, 0.1\n event_id = list(range(1, 33))\n-epochs = mne.Epochs(raw, events, event_id, tmin, tmax, baseline=(None, -0.01))\n+epochs = mne.Epochs(raw, events, event_id, tmin, tmax, baseline=(None, -0.01),\n+ decim=5)\n epochs['1'].average().plot()\n \n ###############################################################################\n # Let's do some dipole fits. The phantom is properly modeled by a single-shell\n # sphere with origin (0., 0., 0.). We compute covariance, then do the fits.\n \n-t_peak = 0.061 # 3 MS at largest peak\n+t_peak = 60e-3 # ~60 MS at largest peak\n sphere = mne.make_sphere_model(r0=(0., 0., 0.), head_radius=None)\n cov = mne.compute_covariance(epochs, tmax=0)\n data = []\n", "issue": "Memory consumption in plot_brainstorm_phantom_elekta\nCircle is failing because it's using so much memory.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\n==========================================\nBrainstorm Elekta phantom tutorial dataset\n==========================================\n\nHere we compute the evoked from raw for the Brainstorm Elekta phantom\ntutorial dataset. For comparison, see [1]_ and:\n\n http://neuroimage.usc.edu/brainstorm/Tutorials/PhantomElekta\n\nReferences\n----------\n.. [1] Tadel F, Baillet S, Mosher JC, Pantazis D, Leahy RM.\n Brainstorm: A User-Friendly Application for MEG/EEG Analysis.\n Computational Intelligence and Neuroscience, vol. 2011, Article ID\n 879716, 13 pages, 2011. doi:10.1155/2011/879716\n\"\"\"\n\n# Authors: Eric Larson <[email protected]>\n#\n# License: BSD (3-clause)\n\nimport os.path as op\nimport numpy as np\n\nimport mne\nfrom mne import find_events, fit_dipole\nfrom mne.datasets.brainstorm import bst_phantom_elekta\nfrom mne.io import read_raw_fif\n\nprint(__doc__)\n\n###############################################################################\n# The data were collected with an Elekta Neuromag VectorView system at 1000 Hz\n# and low-pass filtered at 330 Hz. Here the medium-amplitude (200 nAm) data\n# are read to construct instances of :class:`mne.io.Raw`.\ndata_path = bst_phantom_elekta.data_path()\n\nraw_fname = op.join(data_path, 'kojak_all_200nAm_pp_no_chpi_no_ms_raw.fif')\nraw = read_raw_fif(raw_fname, preload=True)\n\n###############################################################################\n# Data channel array consisted of 204 MEG planor gradiometers,\n# 102 axial magnetometers, and 3 stimulus channels. Let's get the events\n# for the phantom, where each dipole (1-32) gets its own event:\n\nevents = find_events(raw, 'STI201')\nraw.plot(events=events)\nraw.info['bads'] = ['MEG2421']\n\n###############################################################################\n# The data have strong line frequency (60 Hz and harmonics) and cHPI coil\n# noise (five peaks around 300 Hz):\n\nraw.plot_psd()\n\n###############################################################################\n# We know our phantom produces sinusoidal bursts below 25 Hz, so let's filter.\n\nraw.filter(None, 40., h_trans_bandwidth=10., filter_length='1s')\nraw.plot_psd()\n\n###############################################################################\n# The data are still a bit noisy, so let's use Maxwell filtering to clean it.\n# Ideally we would have the fine calibration and cross-talk information\n# for the site of interest, but we don't, so we just do:\n\nraw.fix_mag_coil_types()\nraw = mne.preprocessing.maxwell_filter(raw, origin=(0., 0., 0.))\nraw.plot(events=events)\n\n###############################################################################\n# Now we epoch our data, average it, and look at the first dipole response.\n# The first peak appears around 3 ms.\n\ntmin, tmax = -0.2, 0.2\nevent_id = list(range(1, 33))\nepochs = mne.Epochs(raw, events, event_id, tmin, tmax, baseline=(None, -0.01))\nepochs['1'].average().plot()\n\n###############################################################################\n# Let's do some dipole fits. The phantom is properly modeled by a single-shell\n# sphere with origin (0., 0., 0.). We compute covariance, then do the fits.\n\nt_peak = 0.061 # 3 MS at largest peak\nsphere = mne.make_sphere_model(r0=(0., 0., 0.), head_radius=None)\ncov = mne.compute_covariance(epochs, tmax=0)\ndata = []\nfor ii in range(1, 33):\n evoked = epochs[str(ii)].average().crop(t_peak, t_peak)\n data.append(evoked.data[:, 0])\nevoked = mne.EvokedArray(np.array(data).T, evoked.info, tmin=0.)\ndip = fit_dipole(evoked, cov, sphere, n_jobs=2)[0]\n\n###############################################################################\n# Now we can compare to the actual locations, taking the difference in mm:\n\nactual_pos = mne.dipole.get_phantom_dipoles(kind='122')[0]\ndiffs = 1000 * np.sqrt(np.sum((dip.pos - actual_pos) ** 2, axis=-1))\nprint('Differences (mm):\\n%s' % diffs[:, np.newaxis])\n", "path": "tutorials/plot_brainstorm_phantom_elekta.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\n==========================================\nBrainstorm Elekta phantom tutorial dataset\n==========================================\n\nHere we compute the evoked from raw for the Brainstorm Elekta phantom\ntutorial dataset. For comparison, see [1]_ and:\n\n http://neuroimage.usc.edu/brainstorm/Tutorials/PhantomElekta\n\nReferences\n----------\n.. [1] Tadel F, Baillet S, Mosher JC, Pantazis D, Leahy RM.\n Brainstorm: A User-Friendly Application for MEG/EEG Analysis.\n Computational Intelligence and Neuroscience, vol. 2011, Article ID\n 879716, 13 pages, 2011. doi:10.1155/2011/879716\n\"\"\"\n\n# Authors: Eric Larson <[email protected]>\n#\n# License: BSD (3-clause)\n\nimport os.path as op\nimport numpy as np\n\nimport mne\nfrom mne import find_events, fit_dipole\nfrom mne.datasets.brainstorm import bst_phantom_elekta\nfrom mne.io import read_raw_fif\n\nprint(__doc__)\n\n###############################################################################\n# The data were collected with an Elekta Neuromag VectorView system at 1000 Hz\n# and low-pass filtered at 330 Hz. Here the medium-amplitude (200 nAm) data\n# are read to construct instances of :class:`mne.io.Raw`.\ndata_path = bst_phantom_elekta.data_path()\n\nraw_fname = op.join(data_path, 'kojak_all_200nAm_pp_no_chpi_no_ms_raw.fif')\nraw = read_raw_fif(raw_fname)\n\n###############################################################################\n# Data channel array consisted of 204 MEG planor gradiometers,\n# 102 axial magnetometers, and 3 stimulus channels. Let's get the events\n# for the phantom, where each dipole (1-32) gets its own event:\n\nevents = find_events(raw, 'STI201')\nraw.plot(events=events)\nraw.info['bads'] = ['MEG2421']\n\n###############################################################################\n# The data have strong line frequency (60 Hz and harmonics) and cHPI coil\n# noise (five peaks around 300 Hz). Here we plot only out to 60 seconds\n# to save memory:\n\nraw.plot_psd(tmax=60.)\n\n###############################################################################\n# Let's use Maxwell filtering to clean the data a bit.\n# Ideally we would have the fine calibration and cross-talk information\n# for the site of interest, but we don't, so we just do:\n\nraw.fix_mag_coil_types()\nraw = mne.preprocessing.maxwell_filter(raw, origin=(0., 0., 0.))\n\n###############################################################################\n# We know our phantom produces sinusoidal bursts below 25 Hz, so let's filter.\n\nraw.filter(None, 40., h_trans_bandwidth=10., filter_length='1s')\nraw.plot(events=events)\n\n###############################################################################\n# Now we epoch our data, average it, and look at the first dipole response.\n# The first peak appears around 3 ms. Because we low-passed at 40 Hz,\n# we can also decimate our data to save memory.\n\ntmin, tmax = -0.1, 0.1\nevent_id = list(range(1, 33))\nepochs = mne.Epochs(raw, events, event_id, tmin, tmax, baseline=(None, -0.01),\n decim=5)\nepochs['1'].average().plot()\n\n###############################################################################\n# Let's do some dipole fits. The phantom is properly modeled by a single-shell\n# sphere with origin (0., 0., 0.). We compute covariance, then do the fits.\n\nt_peak = 60e-3 # ~60 MS at largest peak\nsphere = mne.make_sphere_model(r0=(0., 0., 0.), head_radius=None)\ncov = mne.compute_covariance(epochs, tmax=0)\ndata = []\nfor ii in range(1, 33):\n evoked = epochs[str(ii)].average().crop(t_peak, t_peak)\n data.append(evoked.data[:, 0])\nevoked = mne.EvokedArray(np.array(data).T, evoked.info, tmin=0.)\ndip = fit_dipole(evoked, cov, sphere, n_jobs=2)[0]\n\n###############################################################################\n# Now we can compare to the actual locations, taking the difference in mm:\n\nactual_pos = mne.dipole.get_phantom_dipoles(kind='122')[0]\ndiffs = 1000 * np.sqrt(np.sum((dip.pos - actual_pos) ** 2, axis=-1))\nprint('Differences (mm):\\n%s' % diffs[:, np.newaxis])\n", "path": "tutorials/plot_brainstorm_phantom_elekta.py"}]}
| 1,523 | 772 |
gh_patches_debug_13164
|
rasdani/github-patches
|
git_diff
|
Kinto__kinto-1245
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
JSON validation crash on missing array minItems property
```
File "/home/ubuntu/venvs/kinto/local/lib/python3.5/site-packages/pyramid/viewderivers.py", line 147, in _requestonly_view
response = view(request)
File "/home/ubuntu/venvs/kinto/local/lib/python3.5/site-packages/cornice/service.py", line 491, in wrapper
response = view_()
File "/home/ubuntu/venvs/kinto/local/lib/python3.5/site-packages/kinto/core/resource/__init__.py", line 460, in put
new_record = self.process_record(post_record, old=existing)
File "/home/ubuntu/venvs/kinto/local/lib/python3.5/site-packages/kinto/views/records.py", line 66, in process_record
jsonschema.validate(stripped, schema)
File "/home/ubuntu/venvs/kinto/local/lib/python3.5/site-packages/jsonschema/validators.py", line 540, in validate
cls.check_schema(schema)
File "/home/ubuntu/venvs/kinto/local/lib/python3.5/site-packages/jsonschema/validators.py", line 83, in check_schema
raise SchemaError.create_from(error)
jsonschema.exceptions.SchemaError: [] is too short
Failed validating 'minItems' in schema['properties']['required']:
{'items': {'type': 'string'},
'minItems': 1,
'type': 'array',
'uniqueItems': True}
On instance['required']:
[]
```
JSON validation crash on missing array minItems property
```
File "/home/ubuntu/venvs/kinto/local/lib/python3.5/site-packages/pyramid/viewderivers.py", line 147, in _requestonly_view
response = view(request)
File "/home/ubuntu/venvs/kinto/local/lib/python3.5/site-packages/cornice/service.py", line 491, in wrapper
response = view_()
File "/home/ubuntu/venvs/kinto/local/lib/python3.5/site-packages/kinto/core/resource/__init__.py", line 460, in put
new_record = self.process_record(post_record, old=existing)
File "/home/ubuntu/venvs/kinto/local/lib/python3.5/site-packages/kinto/views/records.py", line 66, in process_record
jsonschema.validate(stripped, schema)
File "/home/ubuntu/venvs/kinto/local/lib/python3.5/site-packages/jsonschema/validators.py", line 540, in validate
cls.check_schema(schema)
File "/home/ubuntu/venvs/kinto/local/lib/python3.5/site-packages/jsonschema/validators.py", line 83, in check_schema
raise SchemaError.create_from(error)
jsonschema.exceptions.SchemaError: [] is too short
Failed validating 'minItems' in schema['properties']['required']:
{'items': {'type': 'string'},
'minItems': 1,
'type': 'array',
'uniqueItems': True}
On instance['required']:
[]
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kinto/views/records.py`
Content:
```
1 import copy
2
3 import jsonschema
4 from kinto.core import resource, utils
5 from kinto.core.errors import raise_invalid
6 from jsonschema import exceptions as jsonschema_exceptions
7 from pyramid.security import Authenticated
8 from pyramid.settings import asbool
9
10 from kinto.views import object_exists_or_404
11
12
13 _parent_path = '/buckets/{{bucket_id}}/collections/{{collection_id}}'
14
15
16 @resource.register(name='record',
17 collection_path=_parent_path + '/records',
18 record_path=_parent_path + '/records/{{id}}')
19 class Record(resource.ShareableResource):
20
21 schema_field = 'schema'
22
23 def __init__(self, request, **kwargs):
24 # Before all, first check that the parent collection exists.
25 # Check if already fetched before (in batch).
26 collections = request.bound_data.setdefault('collections', {})
27 collection_uri = self.get_parent_id(request)
28 if collection_uri not in collections:
29 # Unknown yet, fetch from storage.
30 collection_parent_id = utils.instance_uri(request, 'bucket',
31 id=self.bucket_id)
32 collection = object_exists_or_404(request,
33 collection_id='collection',
34 parent_id=collection_parent_id,
35 object_id=self.collection_id)
36 collections[collection_uri] = collection
37
38 super().__init__(request, **kwargs)
39 self._collection = collections[collection_uri]
40
41 def get_parent_id(self, request):
42 self.bucket_id = request.matchdict['bucket_id']
43 self.collection_id = request.matchdict['collection_id']
44 return utils.instance_uri(request, 'collection',
45 bucket_id=self.bucket_id,
46 id=self.collection_id)
47
48 def process_record(self, new, old=None):
49 """Validate records against collection schema, if any."""
50 new = super().process_record(new, old)
51
52 schema = self._collection.get('schema')
53 settings = self.request.registry.settings
54 schema_validation = 'experimental_collection_schema_validation'
55 if not schema or not asbool(settings.get(schema_validation)):
56 return new
57
58 collection_timestamp = self._collection[self.model.modified_field]
59
60 try:
61 stripped = copy.deepcopy(new)
62 stripped.pop(self.model.id_field, None)
63 stripped.pop(self.model.modified_field, None)
64 stripped.pop(self.model.permissions_field, None)
65 stripped.pop(self.schema_field, None)
66 jsonschema.validate(stripped, schema)
67 except jsonschema_exceptions.ValidationError as e:
68 try:
69 field = e.path.pop() if e.path else e.validator_value.pop()
70 except AttributeError:
71 field = None
72 raise_invalid(self.request, name=field, description=e.message)
73
74 new[self.schema_field] = collection_timestamp
75 return new
76
77 def collection_get(self):
78 result = super().collection_get()
79 self._handle_cache_expires(self.request.response)
80 return result
81
82 def get(self):
83 result = super().get()
84 self._handle_cache_expires(self.request.response)
85 return result
86
87 def _handle_cache_expires(self, response):
88 """If the parent collection defines a ``cache_expires`` attribute,
89 then cache-control response headers are sent.
90
91 .. note::
92
93 Those headers are also sent if the
94 ``kinto.record_cache_expires_seconds`` setting is defined.
95 """
96 is_anonymous = Authenticated not in self.request.effective_principals
97 if not is_anonymous:
98 return
99
100 cache_expires = self._collection.get('cache_expires')
101 if cache_expires is None:
102 by_bucket = '{}_record_cache_expires_seconds'.format(self.bucket_id)
103 by_collection = '{}_{}_record_cache_expires_seconds'.format(
104 self.bucket_id, self.collection_id)
105 settings = self.request.registry.settings
106 cache_expires = settings.get(by_collection,
107 settings.get(by_bucket))
108
109 if cache_expires is not None:
110 response.cache_expires(seconds=int(cache_expires))
111
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/kinto/views/records.py b/kinto/views/records.py
--- a/kinto/views/records.py
+++ b/kinto/views/records.py
@@ -65,10 +65,10 @@
stripped.pop(self.schema_field, None)
jsonschema.validate(stripped, schema)
except jsonschema_exceptions.ValidationError as e:
- try:
- field = e.path.pop() if e.path else e.validator_value.pop()
- except AttributeError:
- field = None
+ if e.validator_value:
+ field = e.validator_value[-1]
+ else:
+ field = e.schema_path[-1]
raise_invalid(self.request, name=field, description=e.message)
new[self.schema_field] = collection_timestamp
|
{"golden_diff": "diff --git a/kinto/views/records.py b/kinto/views/records.py\n--- a/kinto/views/records.py\n+++ b/kinto/views/records.py\n@@ -65,10 +65,10 @@\n stripped.pop(self.schema_field, None)\n jsonschema.validate(stripped, schema)\n except jsonschema_exceptions.ValidationError as e:\n- try:\n- field = e.path.pop() if e.path else e.validator_value.pop()\n- except AttributeError:\n- field = None\n+ if e.validator_value:\n+ field = e.validator_value[-1]\n+ else:\n+ field = e.schema_path[-1]\n raise_invalid(self.request, name=field, description=e.message)\n \n new[self.schema_field] = collection_timestamp\n", "issue": "JSON validation crash on missing array minItems property\n```\r\n File \"/home/ubuntu/venvs/kinto/local/lib/python3.5/site-packages/pyramid/viewderivers.py\", line 147, in _requestonly_view\r\n response = view(request)\r\n File \"/home/ubuntu/venvs/kinto/local/lib/python3.5/site-packages/cornice/service.py\", line 491, in wrapper\r\n response = view_()\r\n File \"/home/ubuntu/venvs/kinto/local/lib/python3.5/site-packages/kinto/core/resource/__init__.py\", line 460, in put\r\n new_record = self.process_record(post_record, old=existing)\r\n File \"/home/ubuntu/venvs/kinto/local/lib/python3.5/site-packages/kinto/views/records.py\", line 66, in process_record\r\n jsonschema.validate(stripped, schema)\r\n File \"/home/ubuntu/venvs/kinto/local/lib/python3.5/site-packages/jsonschema/validators.py\", line 540, in validate\r\n cls.check_schema(schema)\r\n File \"/home/ubuntu/venvs/kinto/local/lib/python3.5/site-packages/jsonschema/validators.py\", line 83, in check_schema\r\n raise SchemaError.create_from(error)\r\njsonschema.exceptions.SchemaError: [] is too short\r\n\r\nFailed validating 'minItems' in schema['properties']['required']:\r\n {'items': {'type': 'string'},\r\n 'minItems': 1,\r\n 'type': 'array',\r\n 'uniqueItems': True}\r\n\r\nOn instance['required']:\r\n []\r\n```\nJSON validation crash on missing array minItems property\n```\r\n File \"/home/ubuntu/venvs/kinto/local/lib/python3.5/site-packages/pyramid/viewderivers.py\", line 147, in _requestonly_view\r\n response = view(request)\r\n File \"/home/ubuntu/venvs/kinto/local/lib/python3.5/site-packages/cornice/service.py\", line 491, in wrapper\r\n response = view_()\r\n File \"/home/ubuntu/venvs/kinto/local/lib/python3.5/site-packages/kinto/core/resource/__init__.py\", line 460, in put\r\n new_record = self.process_record(post_record, old=existing)\r\n File \"/home/ubuntu/venvs/kinto/local/lib/python3.5/site-packages/kinto/views/records.py\", line 66, in process_record\r\n jsonschema.validate(stripped, schema)\r\n File \"/home/ubuntu/venvs/kinto/local/lib/python3.5/site-packages/jsonschema/validators.py\", line 540, in validate\r\n cls.check_schema(schema)\r\n File \"/home/ubuntu/venvs/kinto/local/lib/python3.5/site-packages/jsonschema/validators.py\", line 83, in check_schema\r\n raise SchemaError.create_from(error)\r\njsonschema.exceptions.SchemaError: [] is too short\r\n\r\nFailed validating 'minItems' in schema['properties']['required']:\r\n {'items': {'type': 'string'},\r\n 'minItems': 1,\r\n 'type': 'array',\r\n 'uniqueItems': True}\r\n\r\nOn instance['required']:\r\n []\r\n```\n", "before_files": [{"content": "import copy\n\nimport jsonschema\nfrom kinto.core import resource, utils\nfrom kinto.core.errors import raise_invalid\nfrom jsonschema import exceptions as jsonschema_exceptions\nfrom pyramid.security import Authenticated\nfrom pyramid.settings import asbool\n\nfrom kinto.views import object_exists_or_404\n\n\n_parent_path = '/buckets/{{bucket_id}}/collections/{{collection_id}}'\n\n\[email protected](name='record',\n collection_path=_parent_path + '/records',\n record_path=_parent_path + '/records/{{id}}')\nclass Record(resource.ShareableResource):\n\n schema_field = 'schema'\n\n def __init__(self, request, **kwargs):\n # Before all, first check that the parent collection exists.\n # Check if already fetched before (in batch).\n collections = request.bound_data.setdefault('collections', {})\n collection_uri = self.get_parent_id(request)\n if collection_uri not in collections:\n # Unknown yet, fetch from storage.\n collection_parent_id = utils.instance_uri(request, 'bucket',\n id=self.bucket_id)\n collection = object_exists_or_404(request,\n collection_id='collection',\n parent_id=collection_parent_id,\n object_id=self.collection_id)\n collections[collection_uri] = collection\n\n super().__init__(request, **kwargs)\n self._collection = collections[collection_uri]\n\n def get_parent_id(self, request):\n self.bucket_id = request.matchdict['bucket_id']\n self.collection_id = request.matchdict['collection_id']\n return utils.instance_uri(request, 'collection',\n bucket_id=self.bucket_id,\n id=self.collection_id)\n\n def process_record(self, new, old=None):\n \"\"\"Validate records against collection schema, if any.\"\"\"\n new = super().process_record(new, old)\n\n schema = self._collection.get('schema')\n settings = self.request.registry.settings\n schema_validation = 'experimental_collection_schema_validation'\n if not schema or not asbool(settings.get(schema_validation)):\n return new\n\n collection_timestamp = self._collection[self.model.modified_field]\n\n try:\n stripped = copy.deepcopy(new)\n stripped.pop(self.model.id_field, None)\n stripped.pop(self.model.modified_field, None)\n stripped.pop(self.model.permissions_field, None)\n stripped.pop(self.schema_field, None)\n jsonschema.validate(stripped, schema)\n except jsonschema_exceptions.ValidationError as e:\n try:\n field = e.path.pop() if e.path else e.validator_value.pop()\n except AttributeError:\n field = None\n raise_invalid(self.request, name=field, description=e.message)\n\n new[self.schema_field] = collection_timestamp\n return new\n\n def collection_get(self):\n result = super().collection_get()\n self._handle_cache_expires(self.request.response)\n return result\n\n def get(self):\n result = super().get()\n self._handle_cache_expires(self.request.response)\n return result\n\n def _handle_cache_expires(self, response):\n \"\"\"If the parent collection defines a ``cache_expires`` attribute,\n then cache-control response headers are sent.\n\n .. note::\n\n Those headers are also sent if the\n ``kinto.record_cache_expires_seconds`` setting is defined.\n \"\"\"\n is_anonymous = Authenticated not in self.request.effective_principals\n if not is_anonymous:\n return\n\n cache_expires = self._collection.get('cache_expires')\n if cache_expires is None:\n by_bucket = '{}_record_cache_expires_seconds'.format(self.bucket_id)\n by_collection = '{}_{}_record_cache_expires_seconds'.format(\n self.bucket_id, self.collection_id)\n settings = self.request.registry.settings\n cache_expires = settings.get(by_collection,\n settings.get(by_bucket))\n\n if cache_expires is not None:\n response.cache_expires(seconds=int(cache_expires))\n", "path": "kinto/views/records.py"}], "after_files": [{"content": "import copy\n\nimport jsonschema\nfrom kinto.core import resource, utils\nfrom kinto.core.errors import raise_invalid\nfrom jsonschema import exceptions as jsonschema_exceptions\nfrom pyramid.security import Authenticated\nfrom pyramid.settings import asbool\n\nfrom kinto.views import object_exists_or_404\n\n\n_parent_path = '/buckets/{{bucket_id}}/collections/{{collection_id}}'\n\n\[email protected](name='record',\n collection_path=_parent_path + '/records',\n record_path=_parent_path + '/records/{{id}}')\nclass Record(resource.ShareableResource):\n\n schema_field = 'schema'\n\n def __init__(self, request, **kwargs):\n # Before all, first check that the parent collection exists.\n # Check if already fetched before (in batch).\n collections = request.bound_data.setdefault('collections', {})\n collection_uri = self.get_parent_id(request)\n if collection_uri not in collections:\n # Unknown yet, fetch from storage.\n collection_parent_id = utils.instance_uri(request, 'bucket',\n id=self.bucket_id)\n collection = object_exists_or_404(request,\n collection_id='collection',\n parent_id=collection_parent_id,\n object_id=self.collection_id)\n collections[collection_uri] = collection\n\n super().__init__(request, **kwargs)\n self._collection = collections[collection_uri]\n\n def get_parent_id(self, request):\n self.bucket_id = request.matchdict['bucket_id']\n self.collection_id = request.matchdict['collection_id']\n return utils.instance_uri(request, 'collection',\n bucket_id=self.bucket_id,\n id=self.collection_id)\n\n def process_record(self, new, old=None):\n \"\"\"Validate records against collection schema, if any.\"\"\"\n new = super().process_record(new, old)\n\n schema = self._collection.get('schema')\n settings = self.request.registry.settings\n schema_validation = 'experimental_collection_schema_validation'\n if not schema or not asbool(settings.get(schema_validation)):\n return new\n\n collection_timestamp = self._collection[self.model.modified_field]\n\n try:\n stripped = copy.deepcopy(new)\n stripped.pop(self.model.id_field, None)\n stripped.pop(self.model.modified_field, None)\n stripped.pop(self.model.permissions_field, None)\n stripped.pop(self.schema_field, None)\n jsonschema.validate(stripped, schema)\n except jsonschema_exceptions.ValidationError as e:\n if e.validator_value:\n field = e.validator_value[-1]\n else:\n field = e.schema_path[-1]\n raise_invalid(self.request, name=field, description=e.message)\n\n new[self.schema_field] = collection_timestamp\n return new\n\n def collection_get(self):\n result = super().collection_get()\n self._handle_cache_expires(self.request.response)\n return result\n\n def get(self):\n result = super().get()\n self._handle_cache_expires(self.request.response)\n return result\n\n def _handle_cache_expires(self, response):\n \"\"\"If the parent collection defines a ``cache_expires`` attribute,\n then cache-control response headers are sent.\n\n .. note::\n\n Those headers are also sent if the\n ``kinto.record_cache_expires_seconds`` setting is defined.\n \"\"\"\n is_anonymous = Authenticated not in self.request.effective_principals\n if not is_anonymous:\n return\n\n cache_expires = self._collection.get('cache_expires')\n if cache_expires is None:\n by_bucket = '{}_record_cache_expires_seconds'.format(self.bucket_id)\n by_collection = '{}_{}_record_cache_expires_seconds'.format(\n self.bucket_id, self.collection_id)\n settings = self.request.registry.settings\n cache_expires = settings.get(by_collection,\n settings.get(by_bucket))\n\n if cache_expires is not None:\n response.cache_expires(seconds=int(cache_expires))\n", "path": "kinto/views/records.py"}]}
| 1,981 | 166 |
gh_patches_debug_57
|
rasdani/github-patches
|
git_diff
|
Anselmoo__spectrafit-695
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Docs]: Update labeled criteria for CHANGELOG
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Current Missing Information in the Docs
Update the labeler criteria for avoiding double labels in `CHANGELOG.md`
### Anything else?
_No response_
### Code of Conduct
- [X] I agree to follow this project's Code of Conduct
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `spectrafit/__init__.py`
Content:
```
1 """SpectraFit, fast command line tool for fitting data."""
2 __version__ = "1.0.0a7"
3
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/spectrafit/__init__.py b/spectrafit/__init__.py
--- a/spectrafit/__init__.py
+++ b/spectrafit/__init__.py
@@ -1,2 +1,2 @@
"""SpectraFit, fast command line tool for fitting data."""
-__version__ = "1.0.0a7"
+__version__ = "1.0.0a8"
|
{"golden_diff": "diff --git a/spectrafit/__init__.py b/spectrafit/__init__.py\n--- a/spectrafit/__init__.py\n+++ b/spectrafit/__init__.py\n@@ -1,2 +1,2 @@\n \"\"\"SpectraFit, fast command line tool for fitting data.\"\"\"\n-__version__ = \"1.0.0a7\"\n+__version__ = \"1.0.0a8\"\n", "issue": "[Docs]: Update labeled criteria for CHANGELOG\n### Is there an existing issue for this?\n\n- [X] I have searched the existing issues\n\n### Current Missing Information in the Docs\n\nUpdate the labeler criteria for avoiding double labels in `CHANGELOG.md`\n\n### Anything else?\n\n_No response_\n\n### Code of Conduct\n\n- [X] I agree to follow this project's Code of Conduct\n", "before_files": [{"content": "\"\"\"SpectraFit, fast command line tool for fitting data.\"\"\"\n__version__ = \"1.0.0a7\"\n", "path": "spectrafit/__init__.py"}], "after_files": [{"content": "\"\"\"SpectraFit, fast command line tool for fitting data.\"\"\"\n__version__ = \"1.0.0a8\"\n", "path": "spectrafit/__init__.py"}]}
| 371 | 96 |
gh_patches_debug_27167
|
rasdani/github-patches
|
git_diff
|
conan-io__conan-3254
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Enable python 3.7 in CI
At least for Linux
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `conans/pylint_plugin.py`
Content:
```
1 """Pylint plugin for ConanFile"""
2
3 import astroid
4 from astroid import MANAGER, scoped_nodes
5
6
7 def register(linter):
8 """Declare package as plugin
9
10 This function needs to be declared so astroid treats
11 current file as a plugin.
12 """
13 pass
14
15
16 def transform_conanfile(node):
17 """Transform definition of ConanFile class so dynamic fields are visible to pylint"""
18
19 str_class = scoped_nodes.builtin_lookup("str")
20 info_class = MANAGER.ast_from_module_name("conans.model.info").lookup(
21 "ConanInfo")
22 build_requires_class = MANAGER.ast_from_module_name(
23 "conans.client.graph.graph_manager").lookup("_RecipeBuildRequires")
24 file_copier_class = MANAGER.ast_from_module_name(
25 "conans.client.file_copier").lookup("FileCopier")
26 file_importer_class = MANAGER.ast_from_module_name(
27 "conans.client.importer").lookup("_FileImporter")
28
29 dynamic_fields = {
30 "source_folder": str_class,
31 "build_folder": str_class,
32 "package_folder": str_class,
33 "build_requires": build_requires_class,
34 "info_build": info_class,
35 "info": info_class,
36 "copy": file_copier_class,
37 "copy_deps": file_importer_class,
38 }
39
40 for f, t in dynamic_fields.items():
41 node.locals[f] = [t]
42
43
44 MANAGER.register_transform(
45 scoped_nodes.Class, transform_conanfile,
46 lambda node: node.qname() == "conans.model.conan_file.ConanFile")
47
```
Path: `.ci/jenkins/conf.py`
Content:
```
1 import argparse
2 import os
3 import platform
4 from contextlib import contextmanager
5
6 winpylocation = {"py27": "C:\\Python27\\python.exe",
7 "py34": "C:\\Python34\\python.exe",
8 "py36": "C:\\Python36\\python.exe"}
9
10 macpylocation = {"py27": "/usr/bin/python", # /Users/jenkins_ci/.pyenv/versions/2.7.11/bin/python",
11 "py34": "/Users/jenkins_ci/.pyenv/versions/3.4.7/bin/python",
12 "py36": "/Users/jenkins_ci/.pyenv/versions/3.6.5/bin/python"}
13
14 linuxpylocation = {"py27": "/usr/bin/python2.7",
15 "py34": "/usr/bin/python3.4",
16 "py36": "/usr/bin/python3.6"}
17
18
19 def get_environ(tmp_path):
20 if platform.system() == "Windows":
21 return {"CONAN_BASH_PATH": "c:/tools/msys64/usr/bin/bash",
22 "CONAN_USER_HOME_SHORT": os.path.join(tmp_path, ".conan")}
23 return {}
24
25
26 class Extender(argparse.Action):
27 """Allows to use the same flag several times in a command and creates a list with the values.
28 For example:
29 conan install MyPackage/1.2@user/channel -o qt:value -o mode:2 -s cucumber:true
30 It creates:
31 options = ['qt:value', 'mode:2']
32 settings = ['cucumber:true']
33 """
34 def __call__(self, parser, namespace, values, option_strings=None): # @UnusedVariable
35 # Need None here incase `argparse.SUPPRESS` was supplied for `dest`
36 dest = getattr(namespace, self.dest, None)
37 if not hasattr(dest, 'extend') or dest == self.default:
38 dest = []
39 setattr(namespace, self.dest, dest)
40 # if default isn't set to None, this method might be called
41 # with the default as `values` for other arguments which
42 # share this destination.
43 parser.set_defaults(**{self.dest: None})
44
45 try:
46 dest.extend(values)
47 except ValueError:
48 dest.append(values)
49
50
51 @contextmanager
52 def environment_append(env_vars):
53 old_env = dict(os.environ)
54 for name, value in env_vars.items():
55 if isinstance(value, list):
56 env_vars[name] = os.pathsep.join(value)
57 if name in old_env:
58 env_vars[name] += os.pathsep + old_env[name]
59 os.environ.update(env_vars)
60 try:
61 yield
62 finally:
63 os.environ.clear()
64 os.environ.update(old_env)
65
66
67 @contextmanager
68 def chdir(newdir):
69 old_path = os.getcwd()
70 os.chdir(newdir)
71 try:
72 yield
73 finally:
74 os.chdir(old_path)
75
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/.ci/jenkins/conf.py b/.ci/jenkins/conf.py
--- a/.ci/jenkins/conf.py
+++ b/.ci/jenkins/conf.py
@@ -13,7 +13,8 @@
linuxpylocation = {"py27": "/usr/bin/python2.7",
"py34": "/usr/bin/python3.4",
- "py36": "/usr/bin/python3.6"}
+ "py36": "/usr/bin/python3.6",
+ "py37": "/usr/bin/python3.7"}
def get_environ(tmp_path):
diff --git a/conans/pylint_plugin.py b/conans/pylint_plugin.py
--- a/conans/pylint_plugin.py
+++ b/conans/pylint_plugin.py
@@ -1,7 +1,6 @@
"""Pylint plugin for ConanFile"""
-
import astroid
-from astroid import MANAGER, scoped_nodes
+from astroid import MANAGER
def register(linter):
@@ -16,7 +15,7 @@
def transform_conanfile(node):
"""Transform definition of ConanFile class so dynamic fields are visible to pylint"""
- str_class = scoped_nodes.builtin_lookup("str")
+ str_class = astroid.builtin_lookup("str")
info_class = MANAGER.ast_from_module_name("conans.model.info").lookup(
"ConanInfo")
build_requires_class = MANAGER.ast_from_module_name(
@@ -42,5 +41,5 @@
MANAGER.register_transform(
- scoped_nodes.Class, transform_conanfile,
+ astroid.ClassDef, transform_conanfile,
lambda node: node.qname() == "conans.model.conan_file.ConanFile")
|
{"golden_diff": "diff --git a/.ci/jenkins/conf.py b/.ci/jenkins/conf.py\n--- a/.ci/jenkins/conf.py\n+++ b/.ci/jenkins/conf.py\n@@ -13,7 +13,8 @@\n \n linuxpylocation = {\"py27\": \"/usr/bin/python2.7\",\n \"py34\": \"/usr/bin/python3.4\",\n- \"py36\": \"/usr/bin/python3.6\"}\n+ \"py36\": \"/usr/bin/python3.6\",\n+ \"py37\": \"/usr/bin/python3.7\"}\n \n \n def get_environ(tmp_path):\ndiff --git a/conans/pylint_plugin.py b/conans/pylint_plugin.py\n--- a/conans/pylint_plugin.py\n+++ b/conans/pylint_plugin.py\n@@ -1,7 +1,6 @@\n \"\"\"Pylint plugin for ConanFile\"\"\"\n-\n import astroid\n-from astroid import MANAGER, scoped_nodes\n+from astroid import MANAGER\n \n \n def register(linter):\n@@ -16,7 +15,7 @@\n def transform_conanfile(node):\n \"\"\"Transform definition of ConanFile class so dynamic fields are visible to pylint\"\"\"\n \n- str_class = scoped_nodes.builtin_lookup(\"str\")\n+ str_class = astroid.builtin_lookup(\"str\")\n info_class = MANAGER.ast_from_module_name(\"conans.model.info\").lookup(\n \"ConanInfo\")\n build_requires_class = MANAGER.ast_from_module_name(\n@@ -42,5 +41,5 @@\n \n \n MANAGER.register_transform(\n- scoped_nodes.Class, transform_conanfile,\n+ astroid.ClassDef, transform_conanfile,\n lambda node: node.qname() == \"conans.model.conan_file.ConanFile\")\n", "issue": "Enable python 3.7 in CI \nAt least for Linux\r\n\r\n\n", "before_files": [{"content": "\"\"\"Pylint plugin for ConanFile\"\"\"\n\nimport astroid\nfrom astroid import MANAGER, scoped_nodes\n\n\ndef register(linter):\n \"\"\"Declare package as plugin\n \n This function needs to be declared so astroid treats\n current file as a plugin.\n \"\"\"\n pass\n\n\ndef transform_conanfile(node):\n \"\"\"Transform definition of ConanFile class so dynamic fields are visible to pylint\"\"\"\n\n str_class = scoped_nodes.builtin_lookup(\"str\")\n info_class = MANAGER.ast_from_module_name(\"conans.model.info\").lookup(\n \"ConanInfo\")\n build_requires_class = MANAGER.ast_from_module_name(\n \"conans.client.graph.graph_manager\").lookup(\"_RecipeBuildRequires\")\n file_copier_class = MANAGER.ast_from_module_name(\n \"conans.client.file_copier\").lookup(\"FileCopier\")\n file_importer_class = MANAGER.ast_from_module_name(\n \"conans.client.importer\").lookup(\"_FileImporter\")\n\n dynamic_fields = {\n \"source_folder\": str_class,\n \"build_folder\": str_class,\n \"package_folder\": str_class,\n \"build_requires\": build_requires_class,\n \"info_build\": info_class,\n \"info\": info_class,\n \"copy\": file_copier_class,\n \"copy_deps\": file_importer_class,\n }\n\n for f, t in dynamic_fields.items():\n node.locals[f] = [t]\n\n\nMANAGER.register_transform(\n scoped_nodes.Class, transform_conanfile,\n lambda node: node.qname() == \"conans.model.conan_file.ConanFile\")\n", "path": "conans/pylint_plugin.py"}, {"content": "import argparse\nimport os\nimport platform\nfrom contextlib import contextmanager\n\nwinpylocation = {\"py27\": \"C:\\\\Python27\\\\python.exe\",\n \"py34\": \"C:\\\\Python34\\\\python.exe\",\n \"py36\": \"C:\\\\Python36\\\\python.exe\"}\n\nmacpylocation = {\"py27\": \"/usr/bin/python\", # /Users/jenkins_ci/.pyenv/versions/2.7.11/bin/python\",\n \"py34\": \"/Users/jenkins_ci/.pyenv/versions/3.4.7/bin/python\",\n \"py36\": \"/Users/jenkins_ci/.pyenv/versions/3.6.5/bin/python\"}\n\nlinuxpylocation = {\"py27\": \"/usr/bin/python2.7\",\n \"py34\": \"/usr/bin/python3.4\",\n \"py36\": \"/usr/bin/python3.6\"}\n\n\ndef get_environ(tmp_path):\n if platform.system() == \"Windows\":\n return {\"CONAN_BASH_PATH\": \"c:/tools/msys64/usr/bin/bash\",\n \"CONAN_USER_HOME_SHORT\": os.path.join(tmp_path, \".conan\")}\n return {}\n\n\nclass Extender(argparse.Action):\n \"\"\"Allows to use the same flag several times in a command and creates a list with the values.\n For example:\n conan install MyPackage/1.2@user/channel -o qt:value -o mode:2 -s cucumber:true\n It creates:\n options = ['qt:value', 'mode:2']\n settings = ['cucumber:true']\n \"\"\"\n def __call__(self, parser, namespace, values, option_strings=None): # @UnusedVariable\n # Need None here incase `argparse.SUPPRESS` was supplied for `dest`\n dest = getattr(namespace, self.dest, None)\n if not hasattr(dest, 'extend') or dest == self.default:\n dest = []\n setattr(namespace, self.dest, dest)\n # if default isn't set to None, this method might be called\n # with the default as `values` for other arguments which\n # share this destination.\n parser.set_defaults(**{self.dest: None})\n\n try:\n dest.extend(values)\n except ValueError:\n dest.append(values)\n\n\n@contextmanager\ndef environment_append(env_vars):\n old_env = dict(os.environ)\n for name, value in env_vars.items():\n if isinstance(value, list):\n env_vars[name] = os.pathsep.join(value)\n if name in old_env:\n env_vars[name] += os.pathsep + old_env[name]\n os.environ.update(env_vars)\n try:\n yield\n finally:\n os.environ.clear()\n os.environ.update(old_env)\n\n\n@contextmanager\ndef chdir(newdir):\n old_path = os.getcwd()\n os.chdir(newdir)\n try:\n yield\n finally:\n os.chdir(old_path)\n", "path": ".ci/jenkins/conf.py"}], "after_files": [{"content": "\"\"\"Pylint plugin for ConanFile\"\"\"\nimport astroid\nfrom astroid import MANAGER\n\n\ndef register(linter):\n \"\"\"Declare package as plugin\n \n This function needs to be declared so astroid treats\n current file as a plugin.\n \"\"\"\n pass\n\n\ndef transform_conanfile(node):\n \"\"\"Transform definition of ConanFile class so dynamic fields are visible to pylint\"\"\"\n\n str_class = astroid.builtin_lookup(\"str\")\n info_class = MANAGER.ast_from_module_name(\"conans.model.info\").lookup(\n \"ConanInfo\")\n build_requires_class = MANAGER.ast_from_module_name(\n \"conans.client.graph.graph_manager\").lookup(\"_RecipeBuildRequires\")\n file_copier_class = MANAGER.ast_from_module_name(\n \"conans.client.file_copier\").lookup(\"FileCopier\")\n file_importer_class = MANAGER.ast_from_module_name(\n \"conans.client.importer\").lookup(\"_FileImporter\")\n\n dynamic_fields = {\n \"source_folder\": str_class,\n \"build_folder\": str_class,\n \"package_folder\": str_class,\n \"build_requires\": build_requires_class,\n \"info_build\": info_class,\n \"info\": info_class,\n \"copy\": file_copier_class,\n \"copy_deps\": file_importer_class,\n }\n\n for f, t in dynamic_fields.items():\n node.locals[f] = [t]\n\n\nMANAGER.register_transform(\n astroid.ClassDef, transform_conanfile,\n lambda node: node.qname() == \"conans.model.conan_file.ConanFile\")\n", "path": "conans/pylint_plugin.py"}, {"content": "import argparse\nimport os\nimport platform\nfrom contextlib import contextmanager\n\nwinpylocation = {\"py27\": \"C:\\\\Python27\\\\python.exe\",\n \"py34\": \"C:\\\\Python34\\\\python.exe\",\n \"py36\": \"C:\\\\Python36\\\\python.exe\"}\n\nmacpylocation = {\"py27\": \"/usr/bin/python\", # /Users/jenkins_ci/.pyenv/versions/2.7.11/bin/python\",\n \"py34\": \"/Users/jenkins_ci/.pyenv/versions/3.4.7/bin/python\",\n \"py36\": \"/Users/jenkins_ci/.pyenv/versions/3.6.5/bin/python\"}\n\nlinuxpylocation = {\"py27\": \"/usr/bin/python2.7\",\n \"py34\": \"/usr/bin/python3.4\",\n \"py36\": \"/usr/bin/python3.6\",\n \"py37\": \"/usr/bin/python3.7\"}\n\n\ndef get_environ(tmp_path):\n if platform.system() == \"Windows\":\n return {\"CONAN_BASH_PATH\": \"c:/tools/msys64/usr/bin/bash\",\n \"CONAN_USER_HOME_SHORT\": os.path.join(tmp_path, \".conan\")}\n return {}\n\n\nclass Extender(argparse.Action):\n \"\"\"Allows to use the same flag several times in a command and creates a list with the values.\n For example:\n conan install MyPackage/1.2@user/channel -o qt:value -o mode:2 -s cucumber:true\n It creates:\n options = ['qt:value', 'mode:2']\n settings = ['cucumber:true']\n \"\"\"\n def __call__(self, parser, namespace, values, option_strings=None): # @UnusedVariable\n # Need None here incase `argparse.SUPPRESS` was supplied for `dest`\n dest = getattr(namespace, self.dest, None)\n if not hasattr(dest, 'extend') or dest == self.default:\n dest = []\n setattr(namespace, self.dest, dest)\n # if default isn't set to None, this method might be called\n # with the default as `values` for other arguments which\n # share this destination.\n parser.set_defaults(**{self.dest: None})\n\n try:\n dest.extend(values)\n except ValueError:\n dest.append(values)\n\n\n@contextmanager\ndef environment_append(env_vars):\n old_env = dict(os.environ)\n for name, value in env_vars.items():\n if isinstance(value, list):\n env_vars[name] = os.pathsep.join(value)\n if name in old_env:\n env_vars[name] += os.pathsep + old_env[name]\n os.environ.update(env_vars)\n try:\n yield\n finally:\n os.environ.clear()\n os.environ.update(old_env)\n\n\n@contextmanager\ndef chdir(newdir):\n old_path = os.getcwd()\n os.chdir(newdir)\n try:\n yield\n finally:\n os.chdir(old_path)\n", "path": ".ci/jenkins/conf.py"}]}
| 1,492 | 376 |
gh_patches_debug_1673
|
rasdani/github-patches
|
git_diff
|
instadeepai__Mava-626
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[TEST] Jax Datasets
### What do you want to test?
Jax dataset components
### Outline of test structure
* Unit tests
* Test components and hooks
### Definition of done
Passing checks, cover all hooks, edge cases considered
### Mandatory checklist before making a PR
* [ ] The success criteria laid down in “Definition of done” are met.
* [ ] Test code is documented - docstrings for methods and classes, static types for arguments.
* [ ] Documentation is updated - README, CONTRIBUTING, or other documentation.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mava/components/jax/building/datasets.py`
Content:
```
1 # python3
2 # Copyright 2021 InstaDeep Ltd. All rights reserved.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15
16 """Commonly used dataset components for system builders"""
17 import abc
18 from dataclasses import dataclass
19 from typing import Any, Callable, Optional
20
21 import reverb
22 from acme import datasets
23
24 from mava.components.jax import Component
25 from mava.core_jax import SystemBuilder
26
27 Transform = Callable[[reverb.ReplaySample], reverb.ReplaySample]
28
29
30 class TrainerDataset(Component):
31 @abc.abstractmethod
32 def __init__(
33 self,
34 config: Any,
35 ):
36 """_summary_
37
38 Args:
39 config : _description_.
40 """
41 self.config = config
42
43 @abc.abstractmethod
44 def on_building_trainer_dataset(self, builder: SystemBuilder) -> None:
45 """_summary_
46
47 Args:
48 builder : _description_
49 """
50 pass
51
52 @staticmethod
53 def name() -> str:
54 """_summary_
55
56 Returns:
57 _description_
58 """
59 return "trainer_dataset"
60
61
62 @dataclass
63 class TransitionDatasetConfig:
64 sample_batch_size: int = 256
65 prefetch_size: Optional[int] = None
66 num_parallel_calls: int = 12
67 max_in_flight_samples_per_worker: Optional[int] = None
68 postprocess: Optional[Transform] = None
69 # dataset_name: str = "transition_dataset"
70
71
72 class TransitionDataset(TrainerDataset):
73 def __init__(
74 self,
75 config: TransitionDatasetConfig = TransitionDatasetConfig(),
76 ):
77 """_summary_
78
79 Args:
80 config : _description_.
81 """
82 self.config = config
83
84 def on_building_trainer_dataset(self, builder: SystemBuilder) -> None:
85 """_summary_
86
87 Args:
88 builder : _description_
89 """
90 max_in_flight_samples_per_worker = self.config.max_in_flight_samples_per_worker
91 dataset = datasets.make_reverb_dataset(
92 table=builder.store.trainer_id,
93 server_address=builder.store.data_server_client.server_address,
94 batch_size=self.config.sample_batch_size,
95 prefetch_size=self.config.prefetch_size,
96 num_parallel_calls=self.config.num_parallel_calls,
97 max_in_flight_samples_per_worker=max_in_flight_samples_per_worker,
98 postprocess=self.config.postprocess,
99 )
100
101 builder.store.dataset = iter(dataset)
102
103 @staticmethod
104 def config_class() -> Optional[Callable]:
105 """Config class used for component.
106
107 Returns:
108 config class/dataclass for component.
109 """
110 return TransitionDatasetConfig
111
112
113 @dataclass
114 class TrajectoryDatasetConfig:
115 sample_batch_size: int = 256
116 max_in_flight_samples_per_worker: int = 512
117 num_workers_per_iterator: int = -1
118 max_samples_per_stream: int = -1
119 rate_limiter_timeout_ms: int = -1
120 get_signature_timeout_secs: Optional[int] = None
121 # max_samples: int = -1
122 # dataset_name: str = "trajectory_dataset"
123
124
125 class TrajectoryDataset(TrainerDataset):
126 def __init__(
127 self,
128 config: TrajectoryDatasetConfig = TrajectoryDatasetConfig(),
129 ):
130 """_summary_
131
132 Args:
133 config : _description_.
134 """
135 self.config = config
136
137 def on_building_trainer_dataset(self, builder: SystemBuilder) -> None:
138 """_summary_
139
140 Args:
141 builder : _description_
142 """
143 dataset = reverb.TrajectoryDataset.from_table_signature(
144 server_address=builder.store.data_server_client.server_address,
145 table=builder.store.trainer_id,
146 max_in_flight_samples_per_worker=2 * self.config.sample_batch_size,
147 num_workers_per_iterator=self.config.num_workers_per_iterator,
148 max_samples_per_stream=self.config.max_samples_per_stream,
149 rate_limiter_timeout_ms=self.config.rate_limiter_timeout_ms,
150 get_signature_timeout_secs=self.config.get_signature_timeout_secs,
151 # max_samples=self.config.max_samples,
152 )
153
154 # Add batch dimension.
155 dataset = dataset.batch(self.config.sample_batch_size, drop_remainder=True)
156 builder.store.sample_batch_size = self.config.sample_batch_size
157
158 builder.store.dataset_iterator = dataset.as_numpy_iterator()
159
160 @staticmethod
161 def config_class() -> Optional[Callable]:
162 """Config class used for component.
163
164 Returns:
165 config class/dataclass for component.
166 """
167 return TrajectoryDatasetConfig
168
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/mava/components/jax/building/datasets.py b/mava/components/jax/building/datasets.py
--- a/mava/components/jax/building/datasets.py
+++ b/mava/components/jax/building/datasets.py
@@ -98,7 +98,7 @@
postprocess=self.config.postprocess,
)
- builder.store.dataset = iter(dataset)
+ builder.store.dataset_iterator = iter(dataset)
@staticmethod
def config_class() -> Optional[Callable]:
|
{"golden_diff": "diff --git a/mava/components/jax/building/datasets.py b/mava/components/jax/building/datasets.py\n--- a/mava/components/jax/building/datasets.py\n+++ b/mava/components/jax/building/datasets.py\n@@ -98,7 +98,7 @@\n postprocess=self.config.postprocess,\n )\n \n- builder.store.dataset = iter(dataset)\n+ builder.store.dataset_iterator = iter(dataset)\n \n @staticmethod\n def config_class() -> Optional[Callable]:\n", "issue": "[TEST] Jax Datasets\n### What do you want to test?\r\nJax dataset components\r\n\r\n### Outline of test structure\r\n* Unit tests\r\n* Test components and hooks\r\n\r\n### Definition of done\r\nPassing checks, cover all hooks, edge cases considered\r\n\r\n### Mandatory checklist before making a PR\r\n* [ ] The success criteria laid down in \u201cDefinition of done\u201d are met.\r\n* [ ] Test code is documented - docstrings for methods and classes, static types for arguments.\r\n* [ ] Documentation is updated - README, CONTRIBUTING, or other documentation.\n", "before_files": [{"content": "# python3\n# Copyright 2021 InstaDeep Ltd. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Commonly used dataset components for system builders\"\"\"\nimport abc\nfrom dataclasses import dataclass\nfrom typing import Any, Callable, Optional\n\nimport reverb\nfrom acme import datasets\n\nfrom mava.components.jax import Component\nfrom mava.core_jax import SystemBuilder\n\nTransform = Callable[[reverb.ReplaySample], reverb.ReplaySample]\n\n\nclass TrainerDataset(Component):\n @abc.abstractmethod\n def __init__(\n self,\n config: Any,\n ):\n \"\"\"_summary_\n\n Args:\n config : _description_.\n \"\"\"\n self.config = config\n\n @abc.abstractmethod\n def on_building_trainer_dataset(self, builder: SystemBuilder) -> None:\n \"\"\"_summary_\n\n Args:\n builder : _description_\n \"\"\"\n pass\n\n @staticmethod\n def name() -> str:\n \"\"\"_summary_\n\n Returns:\n _description_\n \"\"\"\n return \"trainer_dataset\"\n\n\n@dataclass\nclass TransitionDatasetConfig:\n sample_batch_size: int = 256\n prefetch_size: Optional[int] = None\n num_parallel_calls: int = 12\n max_in_flight_samples_per_worker: Optional[int] = None\n postprocess: Optional[Transform] = None\n # dataset_name: str = \"transition_dataset\"\n\n\nclass TransitionDataset(TrainerDataset):\n def __init__(\n self,\n config: TransitionDatasetConfig = TransitionDatasetConfig(),\n ):\n \"\"\"_summary_\n\n Args:\n config : _description_.\n \"\"\"\n self.config = config\n\n def on_building_trainer_dataset(self, builder: SystemBuilder) -> None:\n \"\"\"_summary_\n\n Args:\n builder : _description_\n \"\"\"\n max_in_flight_samples_per_worker = self.config.max_in_flight_samples_per_worker\n dataset = datasets.make_reverb_dataset(\n table=builder.store.trainer_id,\n server_address=builder.store.data_server_client.server_address,\n batch_size=self.config.sample_batch_size,\n prefetch_size=self.config.prefetch_size,\n num_parallel_calls=self.config.num_parallel_calls,\n max_in_flight_samples_per_worker=max_in_flight_samples_per_worker,\n postprocess=self.config.postprocess,\n )\n\n builder.store.dataset = iter(dataset)\n\n @staticmethod\n def config_class() -> Optional[Callable]:\n \"\"\"Config class used for component.\n\n Returns:\n config class/dataclass for component.\n \"\"\"\n return TransitionDatasetConfig\n\n\n@dataclass\nclass TrajectoryDatasetConfig:\n sample_batch_size: int = 256\n max_in_flight_samples_per_worker: int = 512\n num_workers_per_iterator: int = -1\n max_samples_per_stream: int = -1\n rate_limiter_timeout_ms: int = -1\n get_signature_timeout_secs: Optional[int] = None\n # max_samples: int = -1\n # dataset_name: str = \"trajectory_dataset\"\n\n\nclass TrajectoryDataset(TrainerDataset):\n def __init__(\n self,\n config: TrajectoryDatasetConfig = TrajectoryDatasetConfig(),\n ):\n \"\"\"_summary_\n\n Args:\n config : _description_.\n \"\"\"\n self.config = config\n\n def on_building_trainer_dataset(self, builder: SystemBuilder) -> None:\n \"\"\"_summary_\n\n Args:\n builder : _description_\n \"\"\"\n dataset = reverb.TrajectoryDataset.from_table_signature(\n server_address=builder.store.data_server_client.server_address,\n table=builder.store.trainer_id,\n max_in_flight_samples_per_worker=2 * self.config.sample_batch_size,\n num_workers_per_iterator=self.config.num_workers_per_iterator,\n max_samples_per_stream=self.config.max_samples_per_stream,\n rate_limiter_timeout_ms=self.config.rate_limiter_timeout_ms,\n get_signature_timeout_secs=self.config.get_signature_timeout_secs,\n # max_samples=self.config.max_samples,\n )\n\n # Add batch dimension.\n dataset = dataset.batch(self.config.sample_batch_size, drop_remainder=True)\n builder.store.sample_batch_size = self.config.sample_batch_size\n\n builder.store.dataset_iterator = dataset.as_numpy_iterator()\n\n @staticmethod\n def config_class() -> Optional[Callable]:\n \"\"\"Config class used for component.\n\n Returns:\n config class/dataclass for component.\n \"\"\"\n return TrajectoryDatasetConfig\n", "path": "mava/components/jax/building/datasets.py"}], "after_files": [{"content": "# python3\n# Copyright 2021 InstaDeep Ltd. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Commonly used dataset components for system builders\"\"\"\nimport abc\nfrom dataclasses import dataclass\nfrom typing import Any, Callable, Optional\n\nimport reverb\nfrom acme import datasets\n\nfrom mava.components.jax import Component\nfrom mava.core_jax import SystemBuilder\n\nTransform = Callable[[reverb.ReplaySample], reverb.ReplaySample]\n\n\nclass TrainerDataset(Component):\n @abc.abstractmethod\n def __init__(\n self,\n config: Any,\n ):\n \"\"\"_summary_\n\n Args:\n config : _description_.\n \"\"\"\n self.config = config\n\n @abc.abstractmethod\n def on_building_trainer_dataset(self, builder: SystemBuilder) -> None:\n \"\"\"_summary_\n\n Args:\n builder : _description_\n \"\"\"\n pass\n\n @staticmethod\n def name() -> str:\n \"\"\"_summary_\n\n Returns:\n _description_\n \"\"\"\n return \"trainer_dataset\"\n\n\n@dataclass\nclass TransitionDatasetConfig:\n sample_batch_size: int = 256\n prefetch_size: Optional[int] = None\n num_parallel_calls: int = 12\n max_in_flight_samples_per_worker: Optional[int] = None\n postprocess: Optional[Transform] = None\n # dataset_name: str = \"transition_dataset\"\n\n\nclass TransitionDataset(TrainerDataset):\n def __init__(\n self,\n config: TransitionDatasetConfig = TransitionDatasetConfig(),\n ):\n \"\"\"_summary_\n\n Args:\n config : _description_.\n \"\"\"\n self.config = config\n\n def on_building_trainer_dataset(self, builder: SystemBuilder) -> None:\n \"\"\"_summary_\n\n Args:\n builder : _description_\n \"\"\"\n max_in_flight_samples_per_worker = self.config.max_in_flight_samples_per_worker\n dataset = datasets.make_reverb_dataset(\n table=builder.store.trainer_id,\n server_address=builder.store.data_server_client.server_address,\n batch_size=self.config.sample_batch_size,\n prefetch_size=self.config.prefetch_size,\n num_parallel_calls=self.config.num_parallel_calls,\n max_in_flight_samples_per_worker=max_in_flight_samples_per_worker,\n postprocess=self.config.postprocess,\n )\n\n builder.store.dataset_iterator = iter(dataset)\n\n @staticmethod\n def config_class() -> Optional[Callable]:\n \"\"\"Config class used for component.\n\n Returns:\n config class/dataclass for component.\n \"\"\"\n return TransitionDatasetConfig\n\n\n@dataclass\nclass TrajectoryDatasetConfig:\n sample_batch_size: int = 256\n max_in_flight_samples_per_worker: int = 512\n num_workers_per_iterator: int = -1\n max_samples_per_stream: int = -1\n rate_limiter_timeout_ms: int = -1\n get_signature_timeout_secs: Optional[int] = None\n # max_samples: int = -1\n # dataset_name: str = \"trajectory_dataset\"\n\n\nclass TrajectoryDataset(TrainerDataset):\n def __init__(\n self,\n config: TrajectoryDatasetConfig = TrajectoryDatasetConfig(),\n ):\n \"\"\"_summary_\n\n Args:\n config : _description_.\n \"\"\"\n self.config = config\n\n def on_building_trainer_dataset(self, builder: SystemBuilder) -> None:\n \"\"\"_summary_\n\n Args:\n builder : _description_\n \"\"\"\n dataset = reverb.TrajectoryDataset.from_table_signature(\n server_address=builder.store.data_server_client.server_address,\n table=builder.store.trainer_id,\n max_in_flight_samples_per_worker=2 * self.config.sample_batch_size,\n num_workers_per_iterator=self.config.num_workers_per_iterator,\n max_samples_per_stream=self.config.max_samples_per_stream,\n rate_limiter_timeout_ms=self.config.rate_limiter_timeout_ms,\n get_signature_timeout_secs=self.config.get_signature_timeout_secs,\n # max_samples=self.config.max_samples,\n )\n\n # Add batch dimension.\n dataset = dataset.batch(self.config.sample_batch_size, drop_remainder=True)\n builder.store.sample_batch_size = self.config.sample_batch_size\n\n builder.store.dataset_iterator = dataset.as_numpy_iterator()\n\n @staticmethod\n def config_class() -> Optional[Callable]:\n \"\"\"Config class used for component.\n\n Returns:\n config class/dataclass for component.\n \"\"\"\n return TrajectoryDatasetConfig\n", "path": "mava/components/jax/building/datasets.py"}]}
| 1,845 | 109 |
gh_patches_debug_31533
|
rasdani/github-patches
|
git_diff
|
Qiskit__qiskit-5943
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Edit user config file programatically
It would be great to be able to modify the user config file from Qiskit. Something like `user_config['default']['option'] = True`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `qiskit/user_config.py`
Content:
```
1 # This code is part of Qiskit.
2 #
3 # (C) Copyright IBM 2017, 2019.
4 #
5 # This code is licensed under the Apache License, Version 2.0. You may
6 # obtain a copy of this license in the LICENSE.txt file in the root directory
7 # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
8 #
9 # Any modifications or derivative works of this code must retain this
10 # copyright notice, and modified files need to carry a notice indicating
11 # that they have been altered from the originals.
12
13 """Utils for reading a user preference config files."""
14
15 import configparser
16 import os
17 from warnings import warn
18
19 from qiskit import exceptions
20
21 DEFAULT_FILENAME = os.path.join(os.path.expanduser("~"), ".qiskit", "settings.conf")
22
23
24 class UserConfig:
25 """Class representing a user config file
26
27 The config file format should look like:
28
29 [default]
30 circuit_drawer = mpl
31 circuit_mpl_style = default
32 circuit_mpl_style_path = ~/.qiskit:<default location>
33 transpile_optimization_level = 1
34 parallel = False
35 num_processes = 4
36
37 """
38
39 def __init__(self, filename=None):
40 """Create a UserConfig
41
42 Args:
43 filename (str): The path to the user config file. If one isn't
44 specified, ~/.qiskit/settings.conf is used.
45 """
46 if filename is None:
47 self.filename = DEFAULT_FILENAME
48 else:
49 self.filename = filename
50 self.settings = {}
51 self.config_parser = configparser.ConfigParser()
52
53 def read_config_file(self):
54 """Read config file and parse the contents into the settings attr."""
55 if not os.path.isfile(self.filename):
56 return
57 self.config_parser.read(self.filename)
58 if "default" in self.config_parser.sections():
59 # Parse circuit_drawer
60 circuit_drawer = self.config_parser.get("default", "circuit_drawer", fallback=None)
61 if circuit_drawer:
62 if circuit_drawer not in ["text", "mpl", "latex", "latex_source", "auto"]:
63 raise exceptions.QiskitUserConfigError(
64 "%s is not a valid circuit drawer backend. Must be "
65 "either 'text', 'mpl', 'latex', 'latex_source', or "
66 "'auto'." % circuit_drawer
67 )
68 self.settings["circuit_drawer"] = circuit_drawer
69
70 # Parse state_drawer
71 state_drawer = self.config_parser.get("default", "state_drawer", fallback=None)
72 if state_drawer:
73 valid_state_drawers = [
74 "repr",
75 "text",
76 "latex",
77 "latex_source",
78 "qsphere",
79 "hinton",
80 "bloch",
81 ]
82 if state_drawer not in valid_state_drawers:
83 valid_choices_string = "', '".join(c for c in valid_state_drawers)
84 raise exceptions.QiskitUserConfigError(
85 f"'{state_drawer}' is not a valid state drawer backend. "
86 f"Choose from: '{valid_choices_string}'"
87 )
88 self.settings["state_drawer"] = state_drawer
89
90 # Parse circuit_mpl_style
91 circuit_mpl_style = self.config_parser.get(
92 "default", "circuit_mpl_style", fallback=None
93 )
94 if circuit_mpl_style:
95 if not isinstance(circuit_mpl_style, str):
96 warn(
97 "%s is not a valid mpl circuit style. Must be "
98 "a text string. Will not load style." % circuit_mpl_style,
99 UserWarning,
100 2,
101 )
102 self.settings["circuit_mpl_style"] = circuit_mpl_style
103
104 # Parse circuit_mpl_style_path
105 circuit_mpl_style_path = self.config_parser.get(
106 "default", "circuit_mpl_style_path", fallback=None
107 )
108 if circuit_mpl_style_path:
109 cpath_list = circuit_mpl_style_path.split(":")
110 for path in cpath_list:
111 if not os.path.exists(os.path.expanduser(path)):
112 warn(
113 "%s is not a valid circuit mpl style path."
114 " Correct the path in ~/.qiskit/settings.conf." % path,
115 UserWarning,
116 2,
117 )
118 self.settings["circuit_mpl_style_path"] = cpath_list
119
120 # Parse transpile_optimization_level
121 transpile_optimization_level = self.config_parser.getint(
122 "default", "transpile_optimization_level", fallback=-1
123 )
124 if transpile_optimization_level != -1:
125 if transpile_optimization_level < 0 or transpile_optimization_level > 3:
126 raise exceptions.QiskitUserConfigError(
127 "%s is not a valid optimization level. Must be " "0, 1, 2, or 3."
128 )
129 self.settings["transpile_optimization_level"] = transpile_optimization_level
130
131 # Parse parallel
132 parallel_enabled = self.config_parser.getboolean("default", "parallel", fallback=None)
133 if parallel_enabled is not None:
134 self.settings["parallel_enabled"] = parallel_enabled
135
136 # Parse num_processes
137 num_processes = self.config_parser.getint("default", "num_processes", fallback=-1)
138 if num_processes != -1:
139 if num_processes <= 0:
140 raise exceptions.QiskitUserConfigError(
141 "%s is not a valid number of processes. Must be " "greater than 0"
142 )
143 self.settings["num_processes"] = num_processes
144
145
146 def get_config():
147 """Read the config file from the default location or env var
148
149 It will read a config file at either the default location
150 ~/.qiskit/settings.conf or if set the value of the QISKIT_SETTINGS env var.
151
152 It will return the parsed settings dict from the parsed config file.
153 Returns:
154 dict: The settings dict from the parsed config file.
155 """
156 filename = os.getenv("QISKIT_SETTINGS", DEFAULT_FILENAME)
157 if not os.path.isfile(filename):
158 return {}
159 user_config = UserConfig(filename)
160 user_config.read_config_file()
161 return user_config.settings
162
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/qiskit/user_config.py b/qiskit/user_config.py
--- a/qiskit/user_config.py
+++ b/qiskit/user_config.py
@@ -143,6 +143,70 @@
self.settings["num_processes"] = num_processes
+def set_config(key, value, section=None, file_path=None):
+ """Adds or modifies a user configuration
+
+ It will add configuration to the currently configured location
+ or the value of file argument.
+
+ Only valid user config can be set in 'default' section. Custom
+ user config can be added in any other sections.
+
+ Changes to the existing config file will not be reflected in
+ the current session since the config file is parsed at import time.
+
+ Args:
+ key (str): name of the config
+ value (obj): value of the config
+ section (str, optional): if not specified, adds it to the
+ `default` section of the config file.
+ file_path (str, optional): the file to which config is added.
+ If not specified, adds it to the default config file or
+ if set, the value of `QISKIT_SETTINGS` env variable.
+
+ Raises:
+ QiskitUserConfigError: if the config is invalid
+ """
+ filename = file_path or os.getenv("QISKIT_SETTINGS", DEFAULT_FILENAME)
+ section = "default" if section is None else section
+
+ if not isinstance(key, str):
+ raise exceptions.QiskitUserConfigError("Key must be string type")
+
+ valid_config = {
+ "circuit_drawer",
+ "circuit_mpl_style",
+ "circuit_mpl_style_path",
+ "transpile_optimization_level",
+ "parallel",
+ "num_processes",
+ }
+
+ if section in [None, "default"]:
+ if key not in valid_config:
+ raise exceptions.QiskitUserConfigError("{} is not a valid user config.".format(key))
+
+ config = configparser.ConfigParser()
+ config.read(filename)
+
+ if section not in config.sections():
+ config.add_section(section)
+
+ config.set(section, key, str(value))
+
+ try:
+ with open(filename, "w") as cfgfile:
+ config.write(cfgfile)
+ except OSError as ex:
+ raise exceptions.QiskitUserConfigError(
+ "Unable to load the config file {}. Error: '{}'".format(filename, str(ex))
+ )
+
+ # validates config
+ user_config = UserConfig(filename)
+ user_config.read_config_file()
+
+
def get_config():
"""Read the config file from the default location or env var
|
{"golden_diff": "diff --git a/qiskit/user_config.py b/qiskit/user_config.py\n--- a/qiskit/user_config.py\n+++ b/qiskit/user_config.py\n@@ -143,6 +143,70 @@\n self.settings[\"num_processes\"] = num_processes\n \n \n+def set_config(key, value, section=None, file_path=None):\n+ \"\"\"Adds or modifies a user configuration\n+\n+ It will add configuration to the currently configured location\n+ or the value of file argument.\n+\n+ Only valid user config can be set in 'default' section. Custom\n+ user config can be added in any other sections.\n+\n+ Changes to the existing config file will not be reflected in\n+ the current session since the config file is parsed at import time.\n+\n+ Args:\n+ key (str): name of the config\n+ value (obj): value of the config\n+ section (str, optional): if not specified, adds it to the\n+ `default` section of the config file.\n+ file_path (str, optional): the file to which config is added.\n+ If not specified, adds it to the default config file or\n+ if set, the value of `QISKIT_SETTINGS` env variable.\n+\n+ Raises:\n+ QiskitUserConfigError: if the config is invalid\n+ \"\"\"\n+ filename = file_path or os.getenv(\"QISKIT_SETTINGS\", DEFAULT_FILENAME)\n+ section = \"default\" if section is None else section\n+\n+ if not isinstance(key, str):\n+ raise exceptions.QiskitUserConfigError(\"Key must be string type\")\n+\n+ valid_config = {\n+ \"circuit_drawer\",\n+ \"circuit_mpl_style\",\n+ \"circuit_mpl_style_path\",\n+ \"transpile_optimization_level\",\n+ \"parallel\",\n+ \"num_processes\",\n+ }\n+\n+ if section in [None, \"default\"]:\n+ if key not in valid_config:\n+ raise exceptions.QiskitUserConfigError(\"{} is not a valid user config.\".format(key))\n+\n+ config = configparser.ConfigParser()\n+ config.read(filename)\n+\n+ if section not in config.sections():\n+ config.add_section(section)\n+\n+ config.set(section, key, str(value))\n+\n+ try:\n+ with open(filename, \"w\") as cfgfile:\n+ config.write(cfgfile)\n+ except OSError as ex:\n+ raise exceptions.QiskitUserConfigError(\n+ \"Unable to load the config file {}. Error: '{}'\".format(filename, str(ex))\n+ )\n+\n+ # validates config\n+ user_config = UserConfig(filename)\n+ user_config.read_config_file()\n+\n+\n def get_config():\n \"\"\"Read the config file from the default location or env var\n", "issue": "Edit user config file programatically\nIt would be great to be able to modify the user config file from Qiskit. Something like `user_config['default']['option'] = True`.\n", "before_files": [{"content": "# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2017, 2019.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\"Utils for reading a user preference config files.\"\"\"\n\nimport configparser\nimport os\nfrom warnings import warn\n\nfrom qiskit import exceptions\n\nDEFAULT_FILENAME = os.path.join(os.path.expanduser(\"~\"), \".qiskit\", \"settings.conf\")\n\n\nclass UserConfig:\n \"\"\"Class representing a user config file\n\n The config file format should look like:\n\n [default]\n circuit_drawer = mpl\n circuit_mpl_style = default\n circuit_mpl_style_path = ~/.qiskit:<default location>\n transpile_optimization_level = 1\n parallel = False\n num_processes = 4\n\n \"\"\"\n\n def __init__(self, filename=None):\n \"\"\"Create a UserConfig\n\n Args:\n filename (str): The path to the user config file. If one isn't\n specified, ~/.qiskit/settings.conf is used.\n \"\"\"\n if filename is None:\n self.filename = DEFAULT_FILENAME\n else:\n self.filename = filename\n self.settings = {}\n self.config_parser = configparser.ConfigParser()\n\n def read_config_file(self):\n \"\"\"Read config file and parse the contents into the settings attr.\"\"\"\n if not os.path.isfile(self.filename):\n return\n self.config_parser.read(self.filename)\n if \"default\" in self.config_parser.sections():\n # Parse circuit_drawer\n circuit_drawer = self.config_parser.get(\"default\", \"circuit_drawer\", fallback=None)\n if circuit_drawer:\n if circuit_drawer not in [\"text\", \"mpl\", \"latex\", \"latex_source\", \"auto\"]:\n raise exceptions.QiskitUserConfigError(\n \"%s is not a valid circuit drawer backend. Must be \"\n \"either 'text', 'mpl', 'latex', 'latex_source', or \"\n \"'auto'.\" % circuit_drawer\n )\n self.settings[\"circuit_drawer\"] = circuit_drawer\n\n # Parse state_drawer\n state_drawer = self.config_parser.get(\"default\", \"state_drawer\", fallback=None)\n if state_drawer:\n valid_state_drawers = [\n \"repr\",\n \"text\",\n \"latex\",\n \"latex_source\",\n \"qsphere\",\n \"hinton\",\n \"bloch\",\n ]\n if state_drawer not in valid_state_drawers:\n valid_choices_string = \"', '\".join(c for c in valid_state_drawers)\n raise exceptions.QiskitUserConfigError(\n f\"'{state_drawer}' is not a valid state drawer backend. \"\n f\"Choose from: '{valid_choices_string}'\"\n )\n self.settings[\"state_drawer\"] = state_drawer\n\n # Parse circuit_mpl_style\n circuit_mpl_style = self.config_parser.get(\n \"default\", \"circuit_mpl_style\", fallback=None\n )\n if circuit_mpl_style:\n if not isinstance(circuit_mpl_style, str):\n warn(\n \"%s is not a valid mpl circuit style. Must be \"\n \"a text string. Will not load style.\" % circuit_mpl_style,\n UserWarning,\n 2,\n )\n self.settings[\"circuit_mpl_style\"] = circuit_mpl_style\n\n # Parse circuit_mpl_style_path\n circuit_mpl_style_path = self.config_parser.get(\n \"default\", \"circuit_mpl_style_path\", fallback=None\n )\n if circuit_mpl_style_path:\n cpath_list = circuit_mpl_style_path.split(\":\")\n for path in cpath_list:\n if not os.path.exists(os.path.expanduser(path)):\n warn(\n \"%s is not a valid circuit mpl style path.\"\n \" Correct the path in ~/.qiskit/settings.conf.\" % path,\n UserWarning,\n 2,\n )\n self.settings[\"circuit_mpl_style_path\"] = cpath_list\n\n # Parse transpile_optimization_level\n transpile_optimization_level = self.config_parser.getint(\n \"default\", \"transpile_optimization_level\", fallback=-1\n )\n if transpile_optimization_level != -1:\n if transpile_optimization_level < 0 or transpile_optimization_level > 3:\n raise exceptions.QiskitUserConfigError(\n \"%s is not a valid optimization level. Must be \" \"0, 1, 2, or 3.\"\n )\n self.settings[\"transpile_optimization_level\"] = transpile_optimization_level\n\n # Parse parallel\n parallel_enabled = self.config_parser.getboolean(\"default\", \"parallel\", fallback=None)\n if parallel_enabled is not None:\n self.settings[\"parallel_enabled\"] = parallel_enabled\n\n # Parse num_processes\n num_processes = self.config_parser.getint(\"default\", \"num_processes\", fallback=-1)\n if num_processes != -1:\n if num_processes <= 0:\n raise exceptions.QiskitUserConfigError(\n \"%s is not a valid number of processes. Must be \" \"greater than 0\"\n )\n self.settings[\"num_processes\"] = num_processes\n\n\ndef get_config():\n \"\"\"Read the config file from the default location or env var\n\n It will read a config file at either the default location\n ~/.qiskit/settings.conf or if set the value of the QISKIT_SETTINGS env var.\n\n It will return the parsed settings dict from the parsed config file.\n Returns:\n dict: The settings dict from the parsed config file.\n \"\"\"\n filename = os.getenv(\"QISKIT_SETTINGS\", DEFAULT_FILENAME)\n if not os.path.isfile(filename):\n return {}\n user_config = UserConfig(filename)\n user_config.read_config_file()\n return user_config.settings\n", "path": "qiskit/user_config.py"}], "after_files": [{"content": "# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2017, 2019.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\"Utils for reading a user preference config files.\"\"\"\n\nimport configparser\nimport os\nfrom warnings import warn\n\nfrom qiskit import exceptions\n\nDEFAULT_FILENAME = os.path.join(os.path.expanduser(\"~\"), \".qiskit\", \"settings.conf\")\n\n\nclass UserConfig:\n \"\"\"Class representing a user config file\n\n The config file format should look like:\n\n [default]\n circuit_drawer = mpl\n circuit_mpl_style = default\n circuit_mpl_style_path = ~/.qiskit:<default location>\n transpile_optimization_level = 1\n parallel = False\n num_processes = 4\n\n \"\"\"\n\n def __init__(self, filename=None):\n \"\"\"Create a UserConfig\n\n Args:\n filename (str): The path to the user config file. If one isn't\n specified, ~/.qiskit/settings.conf is used.\n \"\"\"\n if filename is None:\n self.filename = DEFAULT_FILENAME\n else:\n self.filename = filename\n self.settings = {}\n self.config_parser = configparser.ConfigParser()\n\n def read_config_file(self):\n \"\"\"Read config file and parse the contents into the settings attr.\"\"\"\n if not os.path.isfile(self.filename):\n return\n self.config_parser.read(self.filename)\n if \"default\" in self.config_parser.sections():\n # Parse circuit_drawer\n circuit_drawer = self.config_parser.get(\"default\", \"circuit_drawer\", fallback=None)\n if circuit_drawer:\n if circuit_drawer not in [\"text\", \"mpl\", \"latex\", \"latex_source\", \"auto\"]:\n raise exceptions.QiskitUserConfigError(\n \"%s is not a valid circuit drawer backend. Must be \"\n \"either 'text', 'mpl', 'latex', 'latex_source', or \"\n \"'auto'.\" % circuit_drawer\n )\n self.settings[\"circuit_drawer\"] = circuit_drawer\n\n # Parse state_drawer\n state_drawer = self.config_parser.get(\"default\", \"state_drawer\", fallback=None)\n if state_drawer:\n valid_state_drawers = [\n \"repr\",\n \"text\",\n \"latex\",\n \"latex_source\",\n \"qsphere\",\n \"hinton\",\n \"bloch\",\n ]\n if state_drawer not in valid_state_drawers:\n valid_choices_string = \"', '\".join(c for c in valid_state_drawers)\n raise exceptions.QiskitUserConfigError(\n f\"'{state_drawer}' is not a valid state drawer backend. \"\n f\"Choose from: '{valid_choices_string}'\"\n )\n self.settings[\"state_drawer\"] = state_drawer\n\n # Parse circuit_mpl_style\n circuit_mpl_style = self.config_parser.get(\n \"default\", \"circuit_mpl_style\", fallback=None\n )\n if circuit_mpl_style:\n if not isinstance(circuit_mpl_style, str):\n warn(\n \"%s is not a valid mpl circuit style. Must be \"\n \"a text string. Will not load style.\" % circuit_mpl_style,\n UserWarning,\n 2,\n )\n self.settings[\"circuit_mpl_style\"] = circuit_mpl_style\n\n # Parse circuit_mpl_style_path\n circuit_mpl_style_path = self.config_parser.get(\n \"default\", \"circuit_mpl_style_path\", fallback=None\n )\n if circuit_mpl_style_path:\n cpath_list = circuit_mpl_style_path.split(\":\")\n for path in cpath_list:\n if not os.path.exists(os.path.expanduser(path)):\n warn(\n \"%s is not a valid circuit mpl style path.\"\n \" Correct the path in ~/.qiskit/settings.conf.\" % path,\n UserWarning,\n 2,\n )\n self.settings[\"circuit_mpl_style_path\"] = cpath_list\n\n # Parse transpile_optimization_level\n transpile_optimization_level = self.config_parser.getint(\n \"default\", \"transpile_optimization_level\", fallback=-1\n )\n if transpile_optimization_level != -1:\n if transpile_optimization_level < 0 or transpile_optimization_level > 3:\n raise exceptions.QiskitUserConfigError(\n \"%s is not a valid optimization level. Must be \" \"0, 1, 2, or 3.\"\n )\n self.settings[\"transpile_optimization_level\"] = transpile_optimization_level\n\n # Parse parallel\n parallel_enabled = self.config_parser.getboolean(\"default\", \"parallel\", fallback=None)\n if parallel_enabled is not None:\n self.settings[\"parallel_enabled\"] = parallel_enabled\n\n # Parse num_processes\n num_processes = self.config_parser.getint(\"default\", \"num_processes\", fallback=-1)\n if num_processes != -1:\n if num_processes <= 0:\n raise exceptions.QiskitUserConfigError(\n \"%s is not a valid number of processes. Must be \" \"greater than 0\"\n )\n self.settings[\"num_processes\"] = num_processes\n\n\ndef set_config(key, value, section=None, file_path=None):\n \"\"\"Adds or modifies a user configuration\n\n It will add configuration to the currently configured location\n or the value of file argument.\n\n Only valid user config can be set in 'default' section. Custom\n user config can be added in any other sections.\n\n Changes to the existing config file will not be reflected in\n the current session since the config file is parsed at import time.\n\n Args:\n key (str): name of the config\n value (obj): value of the config\n section (str, optional): if not specified, adds it to the\n `default` section of the config file.\n file_path (str, optional): the file to which config is added.\n If not specified, adds it to the default config file or\n if set, the value of `QISKIT_SETTINGS` env variable.\n\n Raises:\n QiskitUserConfigError: if the config is invalid\n \"\"\"\n filename = file_path or os.getenv(\"QISKIT_SETTINGS\", DEFAULT_FILENAME)\n section = \"default\" if section is None else section\n\n if not isinstance(key, str):\n raise exceptions.QiskitUserConfigError(\"Key must be string type\")\n\n valid_config = {\n \"circuit_drawer\",\n \"circuit_mpl_style\",\n \"circuit_mpl_style_path\",\n \"transpile_optimization_level\",\n \"parallel\",\n \"num_processes\",\n }\n\n if section in [None, \"default\"]:\n if key not in valid_config:\n raise exceptions.QiskitUserConfigError(\"{} is not a valid user config.\".format(key))\n\n config = configparser.ConfigParser()\n config.read(filename)\n\n if section not in config.sections():\n config.add_section(section)\n\n config.set(section, key, str(value))\n\n try:\n with open(filename, \"w\") as cfgfile:\n config.write(cfgfile)\n except OSError as ex:\n raise exceptions.QiskitUserConfigError(\n \"Unable to load the config file {}. Error: '{}'\".format(filename, str(ex))\n )\n\n # validates config\n user_config = UserConfig(filename)\n user_config.read_config_file()\n\n\ndef get_config():\n \"\"\"Read the config file from the default location or env var\n\n It will read a config file at either the default location\n ~/.qiskit/settings.conf or if set the value of the QISKIT_SETTINGS env var.\n\n It will return the parsed settings dict from the parsed config file.\n Returns:\n dict: The settings dict from the parsed config file.\n \"\"\"\n filename = os.getenv(\"QISKIT_SETTINGS\", DEFAULT_FILENAME)\n if not os.path.isfile(filename):\n return {}\n user_config = UserConfig(filename)\n user_config.read_config_file()\n return user_config.settings\n", "path": "qiskit/user_config.py"}]}
| 1,986 | 608 |
gh_patches_debug_24500
|
rasdani/github-patches
|
git_diff
|
mirumee__ariadne-23
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Create `resolve_to` utility
Python variables are usually named using the `snake_case`, whereas majority of JS written today uses the `camelCase`. Ariadne will need to provide a way to resolve one to another.
The idea is to create `resolver_for` function taking single `name` argument and returning resolver for specified attr/key name, enabling **explicit** mapping as such:
```
resolvers_map = {
"User": {
"lastVisitedOn": resolve_to("last_visited_on")
}
}
```
Other advantage of having such utility would be arbitrary field names:
```
resolvers_map = {
"User": {
"lastVisitedOn": resolve_to("last_visit")
}
}
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ariadne/resolvers.py`
Content:
```
1 from graphql import GraphQLObjectType, GraphQLScalarType, GraphQLSchema
2 from graphql.execution.base import ResolveInfo
3
4
5 def default_resolver(parent, info: ResolveInfo):
6 if isinstance(parent, dict):
7 return parent.get(info.field_name)
8 return getattr(parent, info.field_name, None)
9
10
11 def add_resolve_functions_to_schema(schema: GraphQLSchema, resolvers: dict):
12 for type_name, type_object in schema.get_type_map().items():
13 if isinstance(type_object, GraphQLObjectType):
14 add_resolve_functions_to_object(type_name, type_object, resolvers)
15 if isinstance(type_object, GraphQLScalarType):
16 add_resolve_function_to_scalar(type_name, type_object, resolvers)
17
18
19 def add_resolve_functions_to_object(name: str, obj: GraphQLObjectType, resolvers: dict):
20 type_resolver = resolvers.get(name, {})
21 for field_name, field_object in obj.fields.items():
22 field_resolver = type_resolver.get(field_name, default_resolver)
23 field_object.resolver = field_resolver
24
25
26 def add_resolve_function_to_scalar(name: str, obj: GraphQLObjectType, resolvers: dict):
27 serializer = resolvers.get(name, obj.serialize)
28 obj.serialize = serializer
29
```
Path: `ariadne/__init__.py`
Content:
```
1 from .build_schema import build_schema_from_type_definitions
2 from .executable_schema import make_executable_schema
3 from .resolvers import add_resolve_functions_to_schema, default_resolver
4
5 __all__ = [
6 "add_resolve_functions_to_schema",
7 "build_schema_from_type_definitions",
8 "default_resolver",
9 "make_executable_schema",
10 ]
11
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/ariadne/__init__.py b/ariadne/__init__.py
--- a/ariadne/__init__.py
+++ b/ariadne/__init__.py
@@ -1,10 +1,11 @@
from .build_schema import build_schema_from_type_definitions
from .executable_schema import make_executable_schema
-from .resolvers import add_resolve_functions_to_schema, default_resolver
+from .resolvers import add_resolve_functions_to_schema, default_resolver, resolve_to
__all__ = [
"add_resolve_functions_to_schema",
"build_schema_from_type_definitions",
"default_resolver",
"make_executable_schema",
+ "resolve_to",
]
diff --git a/ariadne/resolvers.py b/ariadne/resolvers.py
--- a/ariadne/resolvers.py
+++ b/ariadne/resolvers.py
@@ -2,10 +2,21 @@
from graphql.execution.base import ResolveInfo
-def default_resolver(parent, info: ResolveInfo):
+def resolve_parent_field(parent, name: str):
if isinstance(parent, dict):
- return parent.get(info.field_name)
- return getattr(parent, info.field_name, None)
+ return parent.get(name)
+ return getattr(parent, name, None)
+
+
+def default_resolver(parent, info: ResolveInfo):
+ return resolve_parent_field(parent, info.field_name)
+
+
+def resolve_to(name: str):
+ def resolver(parent, *_):
+ return resolve_parent_field(parent, name)
+
+ return resolver
def add_resolve_functions_to_schema(schema: GraphQLSchema, resolvers: dict):
|
{"golden_diff": "diff --git a/ariadne/__init__.py b/ariadne/__init__.py\n--- a/ariadne/__init__.py\n+++ b/ariadne/__init__.py\n@@ -1,10 +1,11 @@\n from .build_schema import build_schema_from_type_definitions\n from .executable_schema import make_executable_schema\n-from .resolvers import add_resolve_functions_to_schema, default_resolver\n+from .resolvers import add_resolve_functions_to_schema, default_resolver, resolve_to\n \n __all__ = [\n \"add_resolve_functions_to_schema\",\n \"build_schema_from_type_definitions\",\n \"default_resolver\",\n \"make_executable_schema\",\n+ \"resolve_to\",\n ]\ndiff --git a/ariadne/resolvers.py b/ariadne/resolvers.py\n--- a/ariadne/resolvers.py\n+++ b/ariadne/resolvers.py\n@@ -2,10 +2,21 @@\n from graphql.execution.base import ResolveInfo\n \n \n-def default_resolver(parent, info: ResolveInfo):\n+def resolve_parent_field(parent, name: str):\n if isinstance(parent, dict):\n- return parent.get(info.field_name)\n- return getattr(parent, info.field_name, None)\n+ return parent.get(name)\n+ return getattr(parent, name, None)\n+\n+\n+def default_resolver(parent, info: ResolveInfo):\n+ return resolve_parent_field(parent, info.field_name)\n+\n+\n+def resolve_to(name: str):\n+ def resolver(parent, *_):\n+ return resolve_parent_field(parent, name)\n+\n+ return resolver\n \n \n def add_resolve_functions_to_schema(schema: GraphQLSchema, resolvers: dict):\n", "issue": "Create `resolve_to` utility\nPython variables are usually named using the `snake_case`, whereas majority of JS written today uses the `camelCase`. Ariadne will need to provide a way to resolve one to another.\r\n\r\nThe idea is to create `resolver_for` function taking single `name` argument and returning resolver for specified attr/key name, enabling **explicit** mapping as such:\r\n\r\n```\r\nresolvers_map = {\r\n \"User\": {\r\n \"lastVisitedOn\": resolve_to(\"last_visited_on\")\r\n }\r\n}\r\n```\r\n\r\nOther advantage of having such utility would be arbitrary field names:\r\n\r\n```\r\nresolvers_map = {\r\n \"User\": {\r\n \"lastVisitedOn\": resolve_to(\"last_visit\")\r\n }\r\n}\r\n```\n", "before_files": [{"content": "from graphql import GraphQLObjectType, GraphQLScalarType, GraphQLSchema\nfrom graphql.execution.base import ResolveInfo\n\n\ndef default_resolver(parent, info: ResolveInfo):\n if isinstance(parent, dict):\n return parent.get(info.field_name)\n return getattr(parent, info.field_name, None)\n\n\ndef add_resolve_functions_to_schema(schema: GraphQLSchema, resolvers: dict):\n for type_name, type_object in schema.get_type_map().items():\n if isinstance(type_object, GraphQLObjectType):\n add_resolve_functions_to_object(type_name, type_object, resolvers)\n if isinstance(type_object, GraphQLScalarType):\n add_resolve_function_to_scalar(type_name, type_object, resolvers)\n\n\ndef add_resolve_functions_to_object(name: str, obj: GraphQLObjectType, resolvers: dict):\n type_resolver = resolvers.get(name, {})\n for field_name, field_object in obj.fields.items():\n field_resolver = type_resolver.get(field_name, default_resolver)\n field_object.resolver = field_resolver\n\n\ndef add_resolve_function_to_scalar(name: str, obj: GraphQLObjectType, resolvers: dict):\n serializer = resolvers.get(name, obj.serialize)\n obj.serialize = serializer\n", "path": "ariadne/resolvers.py"}, {"content": "from .build_schema import build_schema_from_type_definitions\nfrom .executable_schema import make_executable_schema\nfrom .resolvers import add_resolve_functions_to_schema, default_resolver\n\n__all__ = [\n \"add_resolve_functions_to_schema\",\n \"build_schema_from_type_definitions\",\n \"default_resolver\",\n \"make_executable_schema\",\n]\n", "path": "ariadne/__init__.py"}], "after_files": [{"content": "from graphql import GraphQLObjectType, GraphQLScalarType, GraphQLSchema\nfrom graphql.execution.base import ResolveInfo\n\n\ndef resolve_parent_field(parent, name: str):\n if isinstance(parent, dict):\n return parent.get(name)\n return getattr(parent, name, None)\n\n\ndef default_resolver(parent, info: ResolveInfo):\n return resolve_parent_field(parent, info.field_name)\n\n\ndef resolve_to(name: str):\n def resolver(parent, *_):\n return resolve_parent_field(parent, name)\n\n return resolver\n\n\ndef add_resolve_functions_to_schema(schema: GraphQLSchema, resolvers: dict):\n for type_name, type_object in schema.get_type_map().items():\n if isinstance(type_object, GraphQLObjectType):\n add_resolve_functions_to_object(type_name, type_object, resolvers)\n if isinstance(type_object, GraphQLScalarType):\n add_resolve_function_to_scalar(type_name, type_object, resolvers)\n\n\ndef add_resolve_functions_to_object(name: str, obj: GraphQLObjectType, resolvers: dict):\n type_resolver = resolvers.get(name, {})\n for field_name, field_object in obj.fields.items():\n field_resolver = type_resolver.get(field_name, default_resolver)\n field_object.resolver = field_resolver\n\n\ndef add_resolve_function_to_scalar(name: str, obj: GraphQLObjectType, resolvers: dict):\n serializer = resolvers.get(name, obj.serialize)\n obj.serialize = serializer\n", "path": "ariadne/resolvers.py"}, {"content": "from .build_schema import build_schema_from_type_definitions\nfrom .executable_schema import make_executable_schema\nfrom .resolvers import add_resolve_functions_to_schema, default_resolver, resolve_to\n\n__all__ = [\n \"add_resolve_functions_to_schema\",\n \"build_schema_from_type_definitions\",\n \"default_resolver\",\n \"make_executable_schema\",\n \"resolve_to\",\n]\n", "path": "ariadne/__init__.py"}]}
| 805 | 352 |
gh_patches_debug_5210
|
rasdani/github-patches
|
git_diff
|
fedora-infra__bodhi-3464
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Greenwave consumer should not process the compose results
Currently the greenwave consumer consumes all the messages from `topic=org.fedoraproject.prod.greenwave.decision.update` but some messages on this topic are related to compose results and not koji build.
The consumer should filter the messages based on the `subject_type`, and only process `koji_build` type.
See a koji_build example https://apps.fedoraproject.org/datagrepper/id?id=2019-efb310ed-6acf-4742-809f-90a40c2a5855&is_raw=true&size=extra-large
And a compose example https://apps.fedoraproject.org/datagrepper/id?id=2019-33002e05-241c-4180-8953-5a29cea4443d&is_raw=true&size=extra-large
This is currently raising the following exception in production :
``` 2019-07-26 15:16:55,501 ERROR [bodhi.server][PoolThread-twisted.internet.reactor-0] Bodhi failed to send POST request to Greenwave at the following URL "https://greenwave-web-greenwave.app.os.fedoraproject.org/api/v1.0/decision". The status code was "502". The error was "{'message': 'Failed to retrieve SCM URL from Koji build "Fedora-Workstation-Rawhide-20190726.n.0" at "https://koji.fedoraproject.org/kojihub" (expected SCM URL in "source" attribute)'}". ```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bodhi/server/consumers/greenwave.py`
Content:
```
1 # Copyright © 2019 Red Hat, Inc.
2 #
3 # This file is part of Bodhi.
4 #
5 # This program is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU General Public License
7 # as published by the Free Software Foundation; either version 2
8 # of the License, or (at your option) any later version.
9 #
10 # This program is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 # GNU General Public License for more details.
14 #
15 # You should have received a copy of the GNU General Public License along with
16 # this program; if not, write to the Free Software Foundation, Inc., 51
17 # Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
18 """
19 The "greenwave handler".
20
21 This module is responsible for listening for messages from greenwave.
22 It then updates the policies of the build that greenwave checked.
23 """
24
25 import logging
26
27 import fedora_messaging
28
29 from bodhi.server.models import Build
30 from bodhi.server.util import transactional_session_maker
31
32 log = logging.getLogger(__name__)
33
34
35 class GreenwaveHandler:
36 """
37 The Bodhi Greenwave Handler.
38
39 A fedora-messaging listener waiting for messages from greenwave about enforced policies.
40 """
41
42 def __init__(self):
43 """Initialize the GreenwaveHandler."""
44 self.db_factory = transactional_session_maker()
45
46 def __call__(self, message: fedora_messaging.api.Message):
47 """Handle messages arriving with the configured topic."""
48 msg = message.body
49 if not msg:
50 log.debug("Ignoring message without body.")
51 return
52
53 subject_identifier = msg.get("subject_identifier")
54
55 if subject_identifier is None:
56 log.debug("Couldn't find subject_identifier in Greenwave message")
57 return
58
59 with self.db_factory():
60
61 build = Build.get(subject_identifier)
62 if build is None:
63 log.debug(f"Couldn't find build {subject_identifier} in DB")
64 return
65
66 log.info(f"Updating the test_gating_status for: {build.update.alias}")
67 build.update.update_test_gating_status()
68
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/bodhi/server/consumers/greenwave.py b/bodhi/server/consumers/greenwave.py
--- a/bodhi/server/consumers/greenwave.py
+++ b/bodhi/server/consumers/greenwave.py
@@ -56,6 +56,11 @@
log.debug("Couldn't find subject_identifier in Greenwave message")
return
+ subject_type = msg.get("subject_type")
+ if subject_type == "compose":
+ log.debug("Not requesting a decision for a compose")
+ return
+
with self.db_factory():
build = Build.get(subject_identifier)
|
{"golden_diff": "diff --git a/bodhi/server/consumers/greenwave.py b/bodhi/server/consumers/greenwave.py\n--- a/bodhi/server/consumers/greenwave.py\n+++ b/bodhi/server/consumers/greenwave.py\n@@ -56,6 +56,11 @@\n log.debug(\"Couldn't find subject_identifier in Greenwave message\")\n return\n \n+ subject_type = msg.get(\"subject_type\")\n+ if subject_type == \"compose\":\n+ log.debug(\"Not requesting a decision for a compose\")\n+ return\n+\n with self.db_factory():\n \n build = Build.get(subject_identifier)\n", "issue": "Greenwave consumer should not process the compose results\nCurrently the greenwave consumer consumes all the messages from `topic=org.fedoraproject.prod.greenwave.decision.update` but some messages on this topic are related to compose results and not koji build.\r\n\r\nThe consumer should filter the messages based on the `subject_type`, and only process `koji_build` type.\r\n\r\nSee a koji_build example https://apps.fedoraproject.org/datagrepper/id?id=2019-efb310ed-6acf-4742-809f-90a40c2a5855&is_raw=true&size=extra-large\r\n\r\nAnd a compose example https://apps.fedoraproject.org/datagrepper/id?id=2019-33002e05-241c-4180-8953-5a29cea4443d&is_raw=true&size=extra-large\r\n\r\nThis is currently raising the following exception in production : \r\n``` 2019-07-26 15:16:55,501 ERROR [bodhi.server][PoolThread-twisted.internet.reactor-0] Bodhi failed to send POST request to Greenwave at the following URL \"https://greenwave-web-greenwave.app.os.fedoraproject.org/api/v1.0/decision\". The status code was \"502\". The error was \"{'message': 'Failed to retrieve SCM URL from Koji build \"Fedora-Workstation-Rawhide-20190726.n.0\" at \"https://koji.fedoraproject.org/kojihub\" (expected SCM URL in \"source\" attribute)'}\". ```\n", "before_files": [{"content": "# Copyright \u00a9 2019 Red Hat, Inc.\n#\n# This file is part of Bodhi.\n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License along with\n# this program; if not, write to the Free Software Foundation, Inc., 51\n# Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n\"\"\"\nThe \"greenwave handler\".\n\nThis module is responsible for listening for messages from greenwave.\nIt then updates the policies of the build that greenwave checked.\n\"\"\"\n\nimport logging\n\nimport fedora_messaging\n\nfrom bodhi.server.models import Build\nfrom bodhi.server.util import transactional_session_maker\n\nlog = logging.getLogger(__name__)\n\n\nclass GreenwaveHandler:\n \"\"\"\n The Bodhi Greenwave Handler.\n\n A fedora-messaging listener waiting for messages from greenwave about enforced policies.\n \"\"\"\n\n def __init__(self):\n \"\"\"Initialize the GreenwaveHandler.\"\"\"\n self.db_factory = transactional_session_maker()\n\n def __call__(self, message: fedora_messaging.api.Message):\n \"\"\"Handle messages arriving with the configured topic.\"\"\"\n msg = message.body\n if not msg:\n log.debug(\"Ignoring message without body.\")\n return\n\n subject_identifier = msg.get(\"subject_identifier\")\n\n if subject_identifier is None:\n log.debug(\"Couldn't find subject_identifier in Greenwave message\")\n return\n\n with self.db_factory():\n\n build = Build.get(subject_identifier)\n if build is None:\n log.debug(f\"Couldn't find build {subject_identifier} in DB\")\n return\n\n log.info(f\"Updating the test_gating_status for: {build.update.alias}\")\n build.update.update_test_gating_status()\n", "path": "bodhi/server/consumers/greenwave.py"}], "after_files": [{"content": "# Copyright \u00a9 2019 Red Hat, Inc.\n#\n# This file is part of Bodhi.\n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License along with\n# this program; if not, write to the Free Software Foundation, Inc., 51\n# Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n\"\"\"\nThe \"greenwave handler\".\n\nThis module is responsible for listening for messages from greenwave.\nIt then updates the policies of the build that greenwave checked.\n\"\"\"\n\nimport logging\n\nimport fedora_messaging\n\nfrom bodhi.server.models import Build\nfrom bodhi.server.util import transactional_session_maker\n\nlog = logging.getLogger(__name__)\n\n\nclass GreenwaveHandler:\n \"\"\"\n The Bodhi Greenwave Handler.\n\n A fedora-messaging listener waiting for messages from greenwave about enforced policies.\n \"\"\"\n\n def __init__(self):\n \"\"\"Initialize the GreenwaveHandler.\"\"\"\n self.db_factory = transactional_session_maker()\n\n def __call__(self, message: fedora_messaging.api.Message):\n \"\"\"Handle messages arriving with the configured topic.\"\"\"\n msg = message.body\n if not msg:\n log.debug(\"Ignoring message without body.\")\n return\n\n subject_identifier = msg.get(\"subject_identifier\")\n\n if subject_identifier is None:\n log.debug(\"Couldn't find subject_identifier in Greenwave message\")\n return\n\n subject_type = msg.get(\"subject_type\")\n if subject_type == \"compose\":\n log.debug(\"Not requesting a decision for a compose\")\n return\n\n with self.db_factory():\n\n build = Build.get(subject_identifier)\n if build is None:\n log.debug(f\"Couldn't find build {subject_identifier} in DB\")\n return\n\n log.info(f\"Updating the test_gating_status for: {build.update.alias}\")\n build.update.update_test_gating_status()\n", "path": "bodhi/server/consumers/greenwave.py"}]}
| 1,253 | 139 |
gh_patches_debug_24389
|
rasdani/github-patches
|
git_diff
|
pulp__pulpcore-4189
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Reclaim space for repository fails with Cannot delete some instances of model 'Artifact' because they are referenced through protected foreign keys: 'ContentArtifact.artifact'."
**Version**
3.16, but probably all versions
**Describe the bug**
- Reclaim space for repository fails with the following error.
~~~
Task paused with error: "("Cannot delete some instances of model 'Artifact' because they are referenced through protected foreign keys: 'ContentArtifact.artifact'.", {<ContentArtifact: pk=452959ad-c045-4e85-bf9f-6651ba37f57d>})"
~~~
**To Reproduce**
See BZ
**Additional context**
https://bugzilla.redhat.com/show_bug.cgi?id=2169322
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pulpcore/app/tasks/reclaim_space.py`
Content:
```
1 from pulpcore.app.models import (
2 Artifact,
3 Content,
4 ContentArtifact,
5 ProgressReport,
6 PublishedMetadata,
7 Repository,
8 RepositoryVersion,
9 )
10 from pulpcore.app.util import get_domain
11
12
13 def reclaim_space(repo_pks, keeplist_rv_pks=None, force=False):
14 """
15 This task frees-up disk space by removing Artifact files from the filesystem for Content
16 exclusive to the list of provided repos.
17
18 Note: content marked as `proctected` will be excluded from the reclaim disk space.
19
20 Kwargs:
21 repo_pks (list): A list of repo pks the disk reclaim space is performed on.
22 keeplist_rv_pks (list): A list of repo version pks that will be excluded from the reclaim
23 disk space.
24 force (bool): If True, uploaded content will be taken into account.
25
26 """
27 reclaimed_repos = Repository.objects.filter(pk__in=repo_pks)
28 for repo in reclaimed_repos:
29 repo.invalidate_cache(everything=True)
30
31 domain = get_domain()
32 rest_of_repos = Repository.objects.filter(pulp_domain=domain).exclude(pk__in=repo_pks)
33 c_keep_qs = Content.objects.filter(repositories__in=rest_of_repos)
34 c_reclaim_qs = Content.objects.filter(repositories__in=repo_pks)
35 c_reclaim_qs = c_reclaim_qs.exclude(
36 pk__in=c_keep_qs, pulp_type=PublishedMetadata.get_pulp_type()
37 )
38
39 if keeplist_rv_pks:
40 rv_qs = RepositoryVersion.objects.filter(pk__in=keeplist_rv_pks)
41 rv_content = Content.objects.none()
42 for rv in rv_qs.iterator():
43 rv_content |= rv.content
44 c_reclaim_qs = c_reclaim_qs.exclude(pk__in=rv_content)
45
46 content_distinct = c_reclaim_qs.distinct("pulp_type")
47 unprotected = []
48 for content in content_distinct:
49 if not content.cast().PROTECTED_FROM_RECLAIM:
50 unprotected.append(content.pulp_type)
51
52 ca_qs = ContentArtifact.objects.select_related("content", "artifact").filter(
53 content__in=c_reclaim_qs.values("pk"), artifact__isnull=False
54 )
55 if not force:
56 ca_qs = ca_qs.filter(remoteartifact__isnull=False)
57 artifact_pks = set()
58 ca_to_update = []
59 for ca in ca_qs.iterator():
60 if ca.content.pulp_type in unprotected:
61 artifact_pks.add(ca.artifact.pk)
62 ca.artifact = None
63 ca_to_update.append(ca)
64
65 ContentArtifact.objects.bulk_update(objs=ca_to_update, fields=["artifact"], batch_size=1000)
66 artifacts_to_delete = Artifact.objects.filter(pk__in=artifact_pks)
67 progress_bar = ProgressReport(
68 message="Reclaim disk space",
69 total=artifacts_to_delete.count(),
70 code="reclaim-space.artifact",
71 done=0,
72 state="running",
73 )
74 progress_bar.save()
75
76 counter = 0
77 interval = 100
78 for artifact in artifacts_to_delete.iterator():
79 # we need to manually call delete() because it cleans up the file on the filesystem
80 artifact.delete()
81 progress_bar.done += 1
82 counter += 1
83
84 if counter >= interval:
85 progress_bar.save()
86 counter = 0
87
88 progress_bar.state = "completed"
89 progress_bar.save()
90
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pulpcore/app/tasks/reclaim_space.py b/pulpcore/app/tasks/reclaim_space.py
--- a/pulpcore/app/tasks/reclaim_space.py
+++ b/pulpcore/app/tasks/reclaim_space.py
@@ -1,3 +1,7 @@
+from logging import getLogger
+
+from django.db.models.deletion import ProtectedError
+
from pulpcore.app.models import (
Artifact,
Content,
@@ -9,6 +13,8 @@
)
from pulpcore.app.util import get_domain
+log = getLogger(__name__)
+
def reclaim_space(repo_pks, keeplist_rv_pks=None, force=False):
"""
@@ -76,10 +82,16 @@
counter = 0
interval = 100
for artifact in artifacts_to_delete.iterator():
- # we need to manually call delete() because it cleans up the file on the filesystem
- artifact.delete()
- progress_bar.done += 1
- counter += 1
+ try:
+ # we need to manually call delete() because it cleans up the file on the filesystem
+ artifact.delete()
+ except ProtectedError as e:
+ # Rarely artifact could be shared between to different content units.
+ # Just log and skip the artifact deletion in this case
+ log.info(e)
+ else:
+ progress_bar.done += 1
+ counter += 1
if counter >= interval:
progress_bar.save()
|
{"golden_diff": "diff --git a/pulpcore/app/tasks/reclaim_space.py b/pulpcore/app/tasks/reclaim_space.py\n--- a/pulpcore/app/tasks/reclaim_space.py\n+++ b/pulpcore/app/tasks/reclaim_space.py\n@@ -1,3 +1,7 @@\n+from logging import getLogger\n+\n+from django.db.models.deletion import ProtectedError\n+\n from pulpcore.app.models import (\n Artifact,\n Content,\n@@ -9,6 +13,8 @@\n )\n from pulpcore.app.util import get_domain\n \n+log = getLogger(__name__)\n+\n \n def reclaim_space(repo_pks, keeplist_rv_pks=None, force=False):\n \"\"\"\n@@ -76,10 +82,16 @@\n counter = 0\n interval = 100\n for artifact in artifacts_to_delete.iterator():\n- # we need to manually call delete() because it cleans up the file on the filesystem\n- artifact.delete()\n- progress_bar.done += 1\n- counter += 1\n+ try:\n+ # we need to manually call delete() because it cleans up the file on the filesystem\n+ artifact.delete()\n+ except ProtectedError as e:\n+ # Rarely artifact could be shared between to different content units.\n+ # Just log and skip the artifact deletion in this case\n+ log.info(e)\n+ else:\n+ progress_bar.done += 1\n+ counter += 1\n \n if counter >= interval:\n progress_bar.save()\n", "issue": "Reclaim space for repository fails with Cannot delete some instances of model 'Artifact' because they are referenced through protected foreign keys: 'ContentArtifact.artifact'.\"\n**Version**\r\n3.16, but probably all versions\r\n\r\n**Describe the bug**\r\n\r\n- Reclaim space for repository fails with the following error.\r\n\r\n ~~~\r\n Task paused with error: \"(\"Cannot delete some instances of model 'Artifact' because they are referenced through protected foreign keys: 'ContentArtifact.artifact'.\", {<ContentArtifact: pk=452959ad-c045-4e85-bf9f-6651ba37f57d>})\"\r\n ~~~\r\n\r\n**To Reproduce**\r\nSee BZ\r\n\r\n**Additional context**\r\nhttps://bugzilla.redhat.com/show_bug.cgi?id=2169322\r\n\n", "before_files": [{"content": "from pulpcore.app.models import (\n Artifact,\n Content,\n ContentArtifact,\n ProgressReport,\n PublishedMetadata,\n Repository,\n RepositoryVersion,\n)\nfrom pulpcore.app.util import get_domain\n\n\ndef reclaim_space(repo_pks, keeplist_rv_pks=None, force=False):\n \"\"\"\n This task frees-up disk space by removing Artifact files from the filesystem for Content\n exclusive to the list of provided repos.\n\n Note: content marked as `proctected` will be excluded from the reclaim disk space.\n\n Kwargs:\n repo_pks (list): A list of repo pks the disk reclaim space is performed on.\n keeplist_rv_pks (list): A list of repo version pks that will be excluded from the reclaim\n disk space.\n force (bool): If True, uploaded content will be taken into account.\n\n \"\"\"\n reclaimed_repos = Repository.objects.filter(pk__in=repo_pks)\n for repo in reclaimed_repos:\n repo.invalidate_cache(everything=True)\n\n domain = get_domain()\n rest_of_repos = Repository.objects.filter(pulp_domain=domain).exclude(pk__in=repo_pks)\n c_keep_qs = Content.objects.filter(repositories__in=rest_of_repos)\n c_reclaim_qs = Content.objects.filter(repositories__in=repo_pks)\n c_reclaim_qs = c_reclaim_qs.exclude(\n pk__in=c_keep_qs, pulp_type=PublishedMetadata.get_pulp_type()\n )\n\n if keeplist_rv_pks:\n rv_qs = RepositoryVersion.objects.filter(pk__in=keeplist_rv_pks)\n rv_content = Content.objects.none()\n for rv in rv_qs.iterator():\n rv_content |= rv.content\n c_reclaim_qs = c_reclaim_qs.exclude(pk__in=rv_content)\n\n content_distinct = c_reclaim_qs.distinct(\"pulp_type\")\n unprotected = []\n for content in content_distinct:\n if not content.cast().PROTECTED_FROM_RECLAIM:\n unprotected.append(content.pulp_type)\n\n ca_qs = ContentArtifact.objects.select_related(\"content\", \"artifact\").filter(\n content__in=c_reclaim_qs.values(\"pk\"), artifact__isnull=False\n )\n if not force:\n ca_qs = ca_qs.filter(remoteartifact__isnull=False)\n artifact_pks = set()\n ca_to_update = []\n for ca in ca_qs.iterator():\n if ca.content.pulp_type in unprotected:\n artifact_pks.add(ca.artifact.pk)\n ca.artifact = None\n ca_to_update.append(ca)\n\n ContentArtifact.objects.bulk_update(objs=ca_to_update, fields=[\"artifact\"], batch_size=1000)\n artifacts_to_delete = Artifact.objects.filter(pk__in=artifact_pks)\n progress_bar = ProgressReport(\n message=\"Reclaim disk space\",\n total=artifacts_to_delete.count(),\n code=\"reclaim-space.artifact\",\n done=0,\n state=\"running\",\n )\n progress_bar.save()\n\n counter = 0\n interval = 100\n for artifact in artifacts_to_delete.iterator():\n # we need to manually call delete() because it cleans up the file on the filesystem\n artifact.delete()\n progress_bar.done += 1\n counter += 1\n\n if counter >= interval:\n progress_bar.save()\n counter = 0\n\n progress_bar.state = \"completed\"\n progress_bar.save()\n", "path": "pulpcore/app/tasks/reclaim_space.py"}], "after_files": [{"content": "from logging import getLogger\n\nfrom django.db.models.deletion import ProtectedError\n\nfrom pulpcore.app.models import (\n Artifact,\n Content,\n ContentArtifact,\n ProgressReport,\n PublishedMetadata,\n Repository,\n RepositoryVersion,\n)\nfrom pulpcore.app.util import get_domain\n\nlog = getLogger(__name__)\n\n\ndef reclaim_space(repo_pks, keeplist_rv_pks=None, force=False):\n \"\"\"\n This task frees-up disk space by removing Artifact files from the filesystem for Content\n exclusive to the list of provided repos.\n\n Note: content marked as `proctected` will be excluded from the reclaim disk space.\n\n Kwargs:\n repo_pks (list): A list of repo pks the disk reclaim space is performed on.\n keeplist_rv_pks (list): A list of repo version pks that will be excluded from the reclaim\n disk space.\n force (bool): If True, uploaded content will be taken into account.\n\n \"\"\"\n reclaimed_repos = Repository.objects.filter(pk__in=repo_pks)\n for repo in reclaimed_repos:\n repo.invalidate_cache(everything=True)\n\n domain = get_domain()\n rest_of_repos = Repository.objects.filter(pulp_domain=domain).exclude(pk__in=repo_pks)\n c_keep_qs = Content.objects.filter(repositories__in=rest_of_repos)\n c_reclaim_qs = Content.objects.filter(repositories__in=repo_pks)\n c_reclaim_qs = c_reclaim_qs.exclude(\n pk__in=c_keep_qs, pulp_type=PublishedMetadata.get_pulp_type()\n )\n\n if keeplist_rv_pks:\n rv_qs = RepositoryVersion.objects.filter(pk__in=keeplist_rv_pks)\n rv_content = Content.objects.none()\n for rv in rv_qs.iterator():\n rv_content |= rv.content\n c_reclaim_qs = c_reclaim_qs.exclude(pk__in=rv_content)\n\n content_distinct = c_reclaim_qs.distinct(\"pulp_type\")\n unprotected = []\n for content in content_distinct:\n if not content.cast().PROTECTED_FROM_RECLAIM:\n unprotected.append(content.pulp_type)\n\n ca_qs = ContentArtifact.objects.select_related(\"content\", \"artifact\").filter(\n content__in=c_reclaim_qs.values(\"pk\"), artifact__isnull=False\n )\n if not force:\n ca_qs = ca_qs.filter(remoteartifact__isnull=False)\n artifact_pks = set()\n ca_to_update = []\n for ca in ca_qs.iterator():\n if ca.content.pulp_type in unprotected:\n artifact_pks.add(ca.artifact.pk)\n ca.artifact = None\n ca_to_update.append(ca)\n\n ContentArtifact.objects.bulk_update(objs=ca_to_update, fields=[\"artifact\"], batch_size=1000)\n artifacts_to_delete = Artifact.objects.filter(pk__in=artifact_pks)\n progress_bar = ProgressReport(\n message=\"Reclaim disk space\",\n total=artifacts_to_delete.count(),\n code=\"reclaim-space.artifact\",\n done=0,\n state=\"running\",\n )\n progress_bar.save()\n\n counter = 0\n interval = 100\n for artifact in artifacts_to_delete.iterator():\n try:\n # we need to manually call delete() because it cleans up the file on the filesystem\n artifact.delete()\n except ProtectedError as e:\n # Rarely artifact could be shared between to different content units.\n # Just log and skip the artifact deletion in this case\n log.info(e)\n else:\n progress_bar.done += 1\n counter += 1\n\n if counter >= interval:\n progress_bar.save()\n counter = 0\n\n progress_bar.state = \"completed\"\n progress_bar.save()\n", "path": "pulpcore/app/tasks/reclaim_space.py"}]}
| 1,349 | 322 |
gh_patches_debug_49360
|
rasdani/github-patches
|
git_diff
|
zulip__zulip-5407
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Pull GCI docs into main Zulip repo.
Some of our assets, like git cheatsheets, currently live in the zulip-gci repo. We should move them to the main Zulip repo and link from appropriate places.
cc @synicalsyntax @lonerz @tommyip
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `tools/documentation_crawler/documentation_crawler/spiders/common/spiders.py`
Content:
```
1 #!/usr/bin/env python
2 from __future__ import print_function
3
4 import logging
5 import re
6 import scrapy
7
8 from scrapy import Request
9 from scrapy.linkextractors import IGNORED_EXTENSIONS
10 from scrapy.linkextractors.lxmlhtml import LxmlLinkExtractor
11 from scrapy.utils.url import url_has_any_extension
12
13 from typing import Any, Generator, List, Optional, Tuple
14
15 EXCLUDED_URLS = [
16 # Google calendar returns 404s on HEAD requests unconditionally
17 'https://calendar.google.com/calendar/[email protected]',
18 # Returns 409 errors to HEAD requests frequently
19 'https://medium.freecodecamp.com',
20 ]
21
22
23 class BaseDocumentationSpider(scrapy.Spider):
24 name = None # type: Optional[str]
25 # Exclude domain address.
26 deny_domains = [] # type: List[str]
27 start_urls = [] # type: List[str]
28 deny = [] # type: List[str]
29 file_extensions = ['.' + ext for ext in IGNORED_EXTENSIONS] # type: List[str]
30 tags = ('a', 'area', 'img')
31 attrs = ('href', 'src')
32
33 def __init__(self, *args, **kwargs):
34 # type: (*Any, **Any) -> None
35 super(BaseDocumentationSpider, self).__init__(*args, **kwargs)
36 self.has_error = False
37
38 def _set_error_state(self):
39 # type: () -> None
40 self.has_error = True
41
42 def _has_extension(self, url):
43 # type: (str) -> bool
44 return url_has_any_extension(url, self.file_extensions)
45
46 def _is_external_url(self, url):
47 # type: (str) -> bool
48 return url.startswith('http') or self._has_extension(url)
49
50 def check_existing(self, response):
51 # type: (Any) -> None
52 self.log(response)
53
54 def check_permalink(self, response):
55 # type: (Any) -> None
56 self.log(response)
57 xpath_template = "//*[@id='{permalink}' or @name='{permalink}']"
58 m = re.match(r".+\#(?P<permalink>.*)$", response.request.url) # Get anchor value.
59 if not m:
60 return
61 permalink = m.group('permalink')
62 # Check permalink existing on response page.
63 if not response.selector.xpath(xpath_template.format(permalink=permalink)):
64 self._set_error_state()
65 raise Exception(
66 "Permalink #{} is not found on page {}".format(permalink, response.request.url))
67
68 def parse(self, response):
69 # type: (Any) -> Generator[Request, None, None]
70 self.log(response)
71 for link in LxmlLinkExtractor(deny_domains=self.deny_domains, deny_extensions=['doc'],
72 tags=self.tags, attrs=self.attrs, deny=self.deny,
73 canonicalize=False).extract_links(response):
74 callback = self.parse # type: Any
75 dont_filter = False
76 method = 'GET'
77 if self._is_external_url(link.url):
78 callback = self.check_existing
79 method = 'HEAD'
80 elif '#' in link.url:
81 dont_filter = True
82 callback = self.check_permalink
83 yield Request(link.url, method=method, callback=callback, dont_filter=dont_filter,
84 errback=self.error_callback)
85
86 def retry_request_with_get(self, request):
87 # type: (Request) -> Generator[Request, None, None]
88 request.method = 'GET'
89 request.dont_filter = True
90 yield request
91
92 def exclude_error(self, url):
93 # type: (str) -> bool
94 if url in EXCLUDED_URLS:
95 return True
96 return False
97
98 def error_callback(self, failure):
99 # type: (Any) -> Optional[Generator[Any, None, None]]
100 if hasattr(failure.value, 'response') and failure.value.response:
101 response = failure.value.response
102 if self.exclude_error(response.url):
103 return None
104 if response.status == 404:
105 self._set_error_state()
106 raise Exception('Page not found: {}'.format(response))
107 if response.status == 405 and response.request.method == 'HEAD':
108 # Method 'HEAD' not allowed, repeat request with 'GET'
109 return self.retry_request_with_get(response.request)
110 self.log("Error! Please check link: {}".format(response), logging.ERROR)
111 elif isinstance(failure.type, IOError):
112 self._set_error_state()
113 else:
114 raise Exception(failure.value)
115 return None
116
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/tools/documentation_crawler/documentation_crawler/spiders/common/spiders.py b/tools/documentation_crawler/documentation_crawler/spiders/common/spiders.py
--- a/tools/documentation_crawler/documentation_crawler/spiders/common/spiders.py
+++ b/tools/documentation_crawler/documentation_crawler/spiders/common/spiders.py
@@ -17,6 +17,8 @@
'https://calendar.google.com/calendar/[email protected]',
# Returns 409 errors to HEAD requests frequently
'https://medium.freecodecamp.com',
+ # Returns 404 to HEAD requests unconditionally
+ 'https://www.git-tower.com/blog/command-line-cheat-sheet/',
]
|
{"golden_diff": "diff --git a/tools/documentation_crawler/documentation_crawler/spiders/common/spiders.py b/tools/documentation_crawler/documentation_crawler/spiders/common/spiders.py\n--- a/tools/documentation_crawler/documentation_crawler/spiders/common/spiders.py\n+++ b/tools/documentation_crawler/documentation_crawler/spiders/common/spiders.py\n@@ -17,6 +17,8 @@\n 'https://calendar.google.com/calendar/[email protected]',\n # Returns 409 errors to HEAD requests frequently\n 'https://medium.freecodecamp.com',\n+ # Returns 404 to HEAD requests unconditionally\n+ 'https://www.git-tower.com/blog/command-line-cheat-sheet/',\n ]\n", "issue": "Pull GCI docs into main Zulip repo.\nSome of our assets, like git cheatsheets, currently live in the zulip-gci repo. We should move them to the main Zulip repo and link from appropriate places.\r\n\r\ncc @synicalsyntax @lonerz @tommyip \n", "before_files": [{"content": "#!/usr/bin/env python\nfrom __future__ import print_function\n\nimport logging\nimport re\nimport scrapy\n\nfrom scrapy import Request\nfrom scrapy.linkextractors import IGNORED_EXTENSIONS\nfrom scrapy.linkextractors.lxmlhtml import LxmlLinkExtractor\nfrom scrapy.utils.url import url_has_any_extension\n\nfrom typing import Any, Generator, List, Optional, Tuple\n\nEXCLUDED_URLS = [\n # Google calendar returns 404s on HEAD requests unconditionally\n 'https://calendar.google.com/calendar/[email protected]',\n # Returns 409 errors to HEAD requests frequently\n 'https://medium.freecodecamp.com',\n]\n\n\nclass BaseDocumentationSpider(scrapy.Spider):\n name = None # type: Optional[str]\n # Exclude domain address.\n deny_domains = [] # type: List[str]\n start_urls = [] # type: List[str]\n deny = [] # type: List[str]\n file_extensions = ['.' + ext for ext in IGNORED_EXTENSIONS] # type: List[str]\n tags = ('a', 'area', 'img')\n attrs = ('href', 'src')\n\n def __init__(self, *args, **kwargs):\n # type: (*Any, **Any) -> None\n super(BaseDocumentationSpider, self).__init__(*args, **kwargs)\n self.has_error = False\n\n def _set_error_state(self):\n # type: () -> None\n self.has_error = True\n\n def _has_extension(self, url):\n # type: (str) -> bool\n return url_has_any_extension(url, self.file_extensions)\n\n def _is_external_url(self, url):\n # type: (str) -> bool\n return url.startswith('http') or self._has_extension(url)\n\n def check_existing(self, response):\n # type: (Any) -> None\n self.log(response)\n\n def check_permalink(self, response):\n # type: (Any) -> None\n self.log(response)\n xpath_template = \"//*[@id='{permalink}' or @name='{permalink}']\"\n m = re.match(r\".+\\#(?P<permalink>.*)$\", response.request.url) # Get anchor value.\n if not m:\n return\n permalink = m.group('permalink')\n # Check permalink existing on response page.\n if not response.selector.xpath(xpath_template.format(permalink=permalink)):\n self._set_error_state()\n raise Exception(\n \"Permalink #{} is not found on page {}\".format(permalink, response.request.url))\n\n def parse(self, response):\n # type: (Any) -> Generator[Request, None, None]\n self.log(response)\n for link in LxmlLinkExtractor(deny_domains=self.deny_domains, deny_extensions=['doc'],\n tags=self.tags, attrs=self.attrs, deny=self.deny,\n canonicalize=False).extract_links(response):\n callback = self.parse # type: Any\n dont_filter = False\n method = 'GET'\n if self._is_external_url(link.url):\n callback = self.check_existing\n method = 'HEAD'\n elif '#' in link.url:\n dont_filter = True\n callback = self.check_permalink\n yield Request(link.url, method=method, callback=callback, dont_filter=dont_filter,\n errback=self.error_callback)\n\n def retry_request_with_get(self, request):\n # type: (Request) -> Generator[Request, None, None]\n request.method = 'GET'\n request.dont_filter = True\n yield request\n\n def exclude_error(self, url):\n # type: (str) -> bool\n if url in EXCLUDED_URLS:\n return True\n return False\n\n def error_callback(self, failure):\n # type: (Any) -> Optional[Generator[Any, None, None]]\n if hasattr(failure.value, 'response') and failure.value.response:\n response = failure.value.response\n if self.exclude_error(response.url):\n return None\n if response.status == 404:\n self._set_error_state()\n raise Exception('Page not found: {}'.format(response))\n if response.status == 405 and response.request.method == 'HEAD':\n # Method 'HEAD' not allowed, repeat request with 'GET'\n return self.retry_request_with_get(response.request)\n self.log(\"Error! Please check link: {}\".format(response), logging.ERROR)\n elif isinstance(failure.type, IOError):\n self._set_error_state()\n else:\n raise Exception(failure.value)\n return None\n", "path": "tools/documentation_crawler/documentation_crawler/spiders/common/spiders.py"}], "after_files": [{"content": "#!/usr/bin/env python\nfrom __future__ import print_function\n\nimport logging\nimport re\nimport scrapy\n\nfrom scrapy import Request\nfrom scrapy.linkextractors import IGNORED_EXTENSIONS\nfrom scrapy.linkextractors.lxmlhtml import LxmlLinkExtractor\nfrom scrapy.utils.url import url_has_any_extension\n\nfrom typing import Any, Generator, List, Optional, Tuple\n\nEXCLUDED_URLS = [\n # Google calendar returns 404s on HEAD requests unconditionally\n 'https://calendar.google.com/calendar/[email protected]',\n # Returns 409 errors to HEAD requests frequently\n 'https://medium.freecodecamp.com',\n # Returns 404 to HEAD requests unconditionally\n 'https://www.git-tower.com/blog/command-line-cheat-sheet/',\n]\n\n\nclass BaseDocumentationSpider(scrapy.Spider):\n name = None # type: Optional[str]\n # Exclude domain address.\n deny_domains = [] # type: List[str]\n start_urls = [] # type: List[str]\n deny = [] # type: List[str]\n file_extensions = ['.' + ext for ext in IGNORED_EXTENSIONS] # type: List[str]\n tags = ('a', 'area', 'img')\n attrs = ('href', 'src')\n\n def __init__(self, *args, **kwargs):\n # type: (*Any, **Any) -> None\n super(BaseDocumentationSpider, self).__init__(*args, **kwargs)\n self.has_error = False\n\n def _set_error_state(self):\n # type: () -> None\n self.has_error = True\n\n def _has_extension(self, url):\n # type: (str) -> bool\n return url_has_any_extension(url, self.file_extensions)\n\n def _is_external_url(self, url):\n # type: (str) -> bool\n return url.startswith('http') or self._has_extension(url)\n\n def check_existing(self, response):\n # type: (Any) -> None\n self.log(response)\n\n def check_permalink(self, response):\n # type: (Any) -> None\n self.log(response)\n xpath_template = \"//*[@id='{permalink}' or @name='{permalink}']\"\n m = re.match(r\".+\\#(?P<permalink>.*)$\", response.request.url) # Get anchor value.\n if not m:\n return\n permalink = m.group('permalink')\n # Check permalink existing on response page.\n if not response.selector.xpath(xpath_template.format(permalink=permalink)):\n self._set_error_state()\n raise Exception(\n \"Permalink #{} is not found on page {}\".format(permalink, response.request.url))\n\n def parse(self, response):\n # type: (Any) -> Generator[Request, None, None]\n self.log(response)\n for link in LxmlLinkExtractor(deny_domains=self.deny_domains, deny_extensions=['doc'],\n tags=self.tags, attrs=self.attrs, deny=self.deny,\n canonicalize=False).extract_links(response):\n callback = self.parse # type: Any\n dont_filter = False\n method = 'GET'\n if self._is_external_url(link.url):\n callback = self.check_existing\n method = 'HEAD'\n elif '#' in link.url:\n dont_filter = True\n callback = self.check_permalink\n yield Request(link.url, method=method, callback=callback, dont_filter=dont_filter,\n errback=self.error_callback)\n\n def retry_request_with_get(self, request):\n # type: (Request) -> Generator[Request, None, None]\n request.method = 'GET'\n request.dont_filter = True\n yield request\n\n def exclude_error(self, url):\n # type: (str) -> bool\n if url in EXCLUDED_URLS:\n return True\n return False\n\n def error_callback(self, failure):\n # type: (Any) -> Optional[Generator[Any, None, None]]\n if hasattr(failure.value, 'response') and failure.value.response:\n response = failure.value.response\n if self.exclude_error(response.url):\n return None\n if response.status == 404:\n self._set_error_state()\n raise Exception('Page not found: {}'.format(response))\n if response.status == 405 and response.request.method == 'HEAD':\n # Method 'HEAD' not allowed, repeat request with 'GET'\n return self.retry_request_with_get(response.request)\n self.log(\"Error! Please check link: {}\".format(response), logging.ERROR)\n elif isinstance(failure.type, IOError):\n self._set_error_state()\n else:\n raise Exception(failure.value)\n return None\n", "path": "tools/documentation_crawler/documentation_crawler/spiders/common/spiders.py"}]}
| 1,568 | 168 |
gh_patches_debug_28640
|
rasdani/github-patches
|
git_diff
|
microsoft__torchgeo-316
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add "Launch in Hub" button to tutorials
We currently have a "Open in Colab" button for our tutorials. We should also have one for the Planetary Computer. See https://planetarycomputer.microsoft.com/dataset/landsat-8-c2-l2#Example-Notebook for an example of how this works.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docs/conf.py`
Content:
```
1 # Copyright (c) Microsoft Corporation. All rights reserved.
2 # Licensed under the MIT License.
3
4 # Configuration file for the Sphinx documentation builder.
5 #
6 # This file only contains a selection of the most common options. For a full
7 # list see the documentation:
8 # https://www.sphinx-doc.org/en/master/usage/configuration.html
9
10 # -- Path setup --------------------------------------------------------------
11
12 import os
13 import sys
14
15 import pytorch_sphinx_theme
16
17 # If extensions (or modules to document with autodoc) are in another directory,
18 # add these directories to sys.path here. If the directory is relative to the
19 # documentation root, use os.path.abspath to make it absolute, like shown here.
20 sys.path.insert(0, os.path.abspath(".."))
21
22 import torchgeo # noqa: E402
23
24 # -- Project information -----------------------------------------------------
25
26 project = "torchgeo"
27 copyright = "2021, Microsoft Corporation"
28 author = torchgeo.__author__
29 version = ".".join(torchgeo.__version__.split(".")[:2])
30 release = torchgeo.__version__
31
32
33 # -- General configuration ---------------------------------------------------
34
35 # Add any Sphinx extension module names here, as strings. They can be
36 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
37 # ones.
38 extensions = [
39 "sphinx.ext.autodoc",
40 "sphinx.ext.intersphinx",
41 "sphinx.ext.napoleon",
42 "sphinx.ext.todo",
43 "sphinx.ext.viewcode",
44 "nbsphinx",
45 ]
46
47 # List of patterns, relative to source directory, that match files and
48 # directories to ignore when looking for source files.
49 # This pattern also affects html_static_path and html_extra_path.
50 exclude_patterns = ["_build"]
51
52 # Sphinx 3.0+ required for:
53 # autodoc_typehints = "description"
54 needs_sphinx = "3.0"
55
56 nitpicky = True
57 nitpick_ignore = [
58 # https://github.com/sphinx-doc/sphinx/issues/8127
59 ("py:class", ".."),
60 # TODO: can't figure out why this isn't found
61 ("py:class", "LightningDataModule"),
62 # Undocumented class
63 ("py:class", "torchvision.models.resnet.ResNet"),
64 ]
65
66
67 # -- Options for HTML output -------------------------------------------------
68
69 # The theme to use for HTML and HTML Help pages. See the documentation for
70 # a list of builtin themes.
71 html_theme = "pytorch_sphinx_theme"
72 html_theme_path = [pytorch_sphinx_theme.get_html_theme_path()]
73
74 # Theme options are theme-specific and customize the look and feel of a theme
75 # further. For a list of options available for each theme, see the
76 # documentation.
77 html_theme_options = {
78 "collapse_navigation": False,
79 "display_version": True,
80 "logo_only": True,
81 "pytorch_project": "docs",
82 "navigation_with_keys": True,
83 "analytics_id": "UA-209075005-1",
84 }
85
86 html_favicon = os.path.join("..", "logo", "favicon.ico")
87
88 html_static_path = ["_static"]
89 html_css_files = ["workaround.css"]
90
91 # -- Extension configuration -------------------------------------------------
92
93 # sphinx.ext.autodoc
94 autodoc_default_options = {
95 "members": True,
96 "special-members": True,
97 "show-inheritance": True,
98 }
99 autodoc_member_order = "bysource"
100 autodoc_typehints = "description"
101
102 # sphinx.ext.intersphinx
103 intersphinx_mapping = {
104 "matplotlib": ("https://matplotlib.org/stable/", None),
105 "python": ("https://docs.python.org/3", None),
106 "pytorch-lightning": ("https://pytorch-lightning.readthedocs.io/en/latest/", None),
107 "rasterio": ("https://rasterio.readthedocs.io/en/latest/", None),
108 "rtree": ("https://rtree.readthedocs.io/en/latest/", None),
109 "torch": ("https://pytorch.org/docs/stable", None),
110 "torchvision": ("https://pytorch.org/vision/stable", None),
111 }
112
113 # nbsphinx
114 nbsphinx_execute = "never"
115 # TODO: branch/tag should change depending on which version of docs you look at
116 # TODO: width option of image directive is broken, see:
117 # https://github.com/pytorch/pytorch_sphinx_theme/issues/140
118 nbsphinx_prolog = """
119 {% set colab = "https://colab.research.google.com" %}
120 {% set repo = "microsoft/torchgeo" %}
121 {% if "dev" in env.config.release %}
122 {% set branch = "main" %}
123 {% else %}
124 {% set branch = "releases/v" ~ env.config.version %}
125 {% endif %}
126
127 .. image:: {{ colab }}/assets/colab-badge.svg
128 :class: colabbadge
129 :alt: Open in Colab
130 :target: {{ colab }}/github/{{ repo }}/blob/{{ branch }}/docs/{{ env.docname }}.ipynb
131 """
132
133 # Disables requirejs in nbsphinx to enable compatibility with the pytorch_sphinx_theme
134 # See more information here https://github.com/spatialaudio/nbsphinx/issues/599
135 # NOTE: This will likely break nbsphinx widgets
136 nbsphinx_requirejs_path = ""
137
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -116,18 +116,35 @@
# TODO: width option of image directive is broken, see:
# https://github.com/pytorch/pytorch_sphinx_theme/issues/140
nbsphinx_prolog = """
-{% set colab = "https://colab.research.google.com" %}
+{% set host = "https://colab.research.google.com" %}
{% set repo = "microsoft/torchgeo" %}
+{% set urlpath = "docs/" ~ env.docname ~ ".ipynb" %}
{% if "dev" in env.config.release %}
{% set branch = "main" %}
{% else %}
{% set branch = "releases/v" ~ env.config.version %}
{% endif %}
-.. image:: {{ colab }}/assets/colab-badge.svg
+.. image:: {{ host }}/assets/colab-badge.svg
:class: colabbadge
:alt: Open in Colab
- :target: {{ colab }}/github/{{ repo }}/blob/{{ branch }}/docs/{{ env.docname }}.ipynb
+ :target: {{ host }}/github/{{ repo }}/blob/{{ branch }}/{{ urlpath }}
+
+{% set host = "https://pccompute.westeurope.cloudapp.azure.com" %}
+{% set host = host ~ "/compute/hub/user-redirect/git-pull" %}
+{% set repo = "https%3A%2F%2Fgithub.com%2Fmicrosoft%2Ftorchgeo" %}
+{% set urlpath = "tree%2Ftorchgeo%2Fdocs%2F" %}
+{% set urlpath = urlpath ~ env.docname | replace("/", "%2F") ~ ".ipynb" %}
+{% if "dev" in env.config.release %}
+ {% set branch = "main" %}
+{% else %}
+ {% set branch = "releases%2Fv" ~ env.config.version %}
+{% endif %}
+
+.. image:: https://img.shields.io/badge/-Open%20on%20Planetary%20Computer-blue
+ :class: colabbadge
+ :alt: Open on Planetary Computer
+ :target: {{ host }}?repo={{ repo }}&urlpath={{ urlpath }}&branch={{ branch }}
"""
# Disables requirejs in nbsphinx to enable compatibility with the pytorch_sphinx_theme
|
{"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -116,18 +116,35 @@\n # TODO: width option of image directive is broken, see:\n # https://github.com/pytorch/pytorch_sphinx_theme/issues/140\n nbsphinx_prolog = \"\"\"\n-{% set colab = \"https://colab.research.google.com\" %}\n+{% set host = \"https://colab.research.google.com\" %}\n {% set repo = \"microsoft/torchgeo\" %}\n+{% set urlpath = \"docs/\" ~ env.docname ~ \".ipynb\" %}\n {% if \"dev\" in env.config.release %}\n {% set branch = \"main\" %}\n {% else %}\n {% set branch = \"releases/v\" ~ env.config.version %}\n {% endif %}\n \n-.. image:: {{ colab }}/assets/colab-badge.svg\n+.. image:: {{ host }}/assets/colab-badge.svg\n :class: colabbadge\n :alt: Open in Colab\n- :target: {{ colab }}/github/{{ repo }}/blob/{{ branch }}/docs/{{ env.docname }}.ipynb\n+ :target: {{ host }}/github/{{ repo }}/blob/{{ branch }}/{{ urlpath }}\n+\n+{% set host = \"https://pccompute.westeurope.cloudapp.azure.com\" %}\n+{% set host = host ~ \"/compute/hub/user-redirect/git-pull\" %}\n+{% set repo = \"https%3A%2F%2Fgithub.com%2Fmicrosoft%2Ftorchgeo\" %}\n+{% set urlpath = \"tree%2Ftorchgeo%2Fdocs%2F\" %}\n+{% set urlpath = urlpath ~ env.docname | replace(\"/\", \"%2F\") ~ \".ipynb\" %}\n+{% if \"dev\" in env.config.release %}\n+ {% set branch = \"main\" %}\n+{% else %}\n+ {% set branch = \"releases%2Fv\" ~ env.config.version %}\n+{% endif %}\n+\n+.. image:: https://img.shields.io/badge/-Open%20on%20Planetary%20Computer-blue\n+ :class: colabbadge\n+ :alt: Open on Planetary Computer\n+ :target: {{ host }}?repo={{ repo }}&urlpath={{ urlpath }}&branch={{ branch }}\n \"\"\"\n \n # Disables requirejs in nbsphinx to enable compatibility with the pytorch_sphinx_theme\n", "issue": "Add \"Launch in Hub\" button to tutorials\nWe currently have a \"Open in Colab\" button for our tutorials. We should also have one for the Planetary Computer. See https://planetarycomputer.microsoft.com/dataset/landsat-8-c2-l2#Example-Notebook for an example of how this works.\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\n# Configuration file for the Sphinx documentation builder.\n#\n# This file only contains a selection of the most common options. For a full\n# list see the documentation:\n# https://www.sphinx-doc.org/en/master/usage/configuration.html\n\n# -- Path setup --------------------------------------------------------------\n\nimport os\nimport sys\n\nimport pytorch_sphinx_theme\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\nsys.path.insert(0, os.path.abspath(\"..\"))\n\nimport torchgeo # noqa: E402\n\n# -- Project information -----------------------------------------------------\n\nproject = \"torchgeo\"\ncopyright = \"2021, Microsoft Corporation\"\nauthor = torchgeo.__author__\nversion = \".\".join(torchgeo.__version__.split(\".\")[:2])\nrelease = torchgeo.__version__\n\n\n# -- General configuration ---------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.intersphinx\",\n \"sphinx.ext.napoleon\",\n \"sphinx.ext.todo\",\n \"sphinx.ext.viewcode\",\n \"nbsphinx\",\n]\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = [\"_build\"]\n\n# Sphinx 3.0+ required for:\n# autodoc_typehints = \"description\"\nneeds_sphinx = \"3.0\"\n\nnitpicky = True\nnitpick_ignore = [\n # https://github.com/sphinx-doc/sphinx/issues/8127\n (\"py:class\", \"..\"),\n # TODO: can't figure out why this isn't found\n (\"py:class\", \"LightningDataModule\"),\n # Undocumented class\n (\"py:class\", \"torchvision.models.resnet.ResNet\"),\n]\n\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\nhtml_theme = \"pytorch_sphinx_theme\"\nhtml_theme_path = [pytorch_sphinx_theme.get_html_theme_path()]\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\nhtml_theme_options = {\n \"collapse_navigation\": False,\n \"display_version\": True,\n \"logo_only\": True,\n \"pytorch_project\": \"docs\",\n \"navigation_with_keys\": True,\n \"analytics_id\": \"UA-209075005-1\",\n}\n\nhtml_favicon = os.path.join(\"..\", \"logo\", \"favicon.ico\")\n\nhtml_static_path = [\"_static\"]\nhtml_css_files = [\"workaround.css\"]\n\n# -- Extension configuration -------------------------------------------------\n\n# sphinx.ext.autodoc\nautodoc_default_options = {\n \"members\": True,\n \"special-members\": True,\n \"show-inheritance\": True,\n}\nautodoc_member_order = \"bysource\"\nautodoc_typehints = \"description\"\n\n# sphinx.ext.intersphinx\nintersphinx_mapping = {\n \"matplotlib\": (\"https://matplotlib.org/stable/\", None),\n \"python\": (\"https://docs.python.org/3\", None),\n \"pytorch-lightning\": (\"https://pytorch-lightning.readthedocs.io/en/latest/\", None),\n \"rasterio\": (\"https://rasterio.readthedocs.io/en/latest/\", None),\n \"rtree\": (\"https://rtree.readthedocs.io/en/latest/\", None),\n \"torch\": (\"https://pytorch.org/docs/stable\", None),\n \"torchvision\": (\"https://pytorch.org/vision/stable\", None),\n}\n\n# nbsphinx\nnbsphinx_execute = \"never\"\n# TODO: branch/tag should change depending on which version of docs you look at\n# TODO: width option of image directive is broken, see:\n# https://github.com/pytorch/pytorch_sphinx_theme/issues/140\nnbsphinx_prolog = \"\"\"\n{% set colab = \"https://colab.research.google.com\" %}\n{% set repo = \"microsoft/torchgeo\" %}\n{% if \"dev\" in env.config.release %}\n {% set branch = \"main\" %}\n{% else %}\n {% set branch = \"releases/v\" ~ env.config.version %}\n{% endif %}\n\n.. image:: {{ colab }}/assets/colab-badge.svg\n :class: colabbadge\n :alt: Open in Colab\n :target: {{ colab }}/github/{{ repo }}/blob/{{ branch }}/docs/{{ env.docname }}.ipynb\n\"\"\"\n\n# Disables requirejs in nbsphinx to enable compatibility with the pytorch_sphinx_theme\n# See more information here https://github.com/spatialaudio/nbsphinx/issues/599\n# NOTE: This will likely break nbsphinx widgets\nnbsphinx_requirejs_path = \"\"\n", "path": "docs/conf.py"}], "after_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\n# Configuration file for the Sphinx documentation builder.\n#\n# This file only contains a selection of the most common options. For a full\n# list see the documentation:\n# https://www.sphinx-doc.org/en/master/usage/configuration.html\n\n# -- Path setup --------------------------------------------------------------\n\nimport os\nimport sys\n\nimport pytorch_sphinx_theme\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\nsys.path.insert(0, os.path.abspath(\"..\"))\n\nimport torchgeo # noqa: E402\n\n# -- Project information -----------------------------------------------------\n\nproject = \"torchgeo\"\ncopyright = \"2021, Microsoft Corporation\"\nauthor = torchgeo.__author__\nversion = \".\".join(torchgeo.__version__.split(\".\")[:2])\nrelease = torchgeo.__version__\n\n\n# -- General configuration ---------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.intersphinx\",\n \"sphinx.ext.napoleon\",\n \"sphinx.ext.todo\",\n \"sphinx.ext.viewcode\",\n \"nbsphinx\",\n]\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = [\"_build\"]\n\n# Sphinx 3.0+ required for:\n# autodoc_typehints = \"description\"\nneeds_sphinx = \"3.0\"\n\nnitpicky = True\nnitpick_ignore = [\n # https://github.com/sphinx-doc/sphinx/issues/8127\n (\"py:class\", \"..\"),\n # TODO: can't figure out why this isn't found\n (\"py:class\", \"LightningDataModule\"),\n # Undocumented class\n (\"py:class\", \"torchvision.models.resnet.ResNet\"),\n]\n\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\nhtml_theme = \"pytorch_sphinx_theme\"\nhtml_theme_path = [pytorch_sphinx_theme.get_html_theme_path()]\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\nhtml_theme_options = {\n \"collapse_navigation\": False,\n \"display_version\": True,\n \"logo_only\": True,\n \"pytorch_project\": \"docs\",\n \"navigation_with_keys\": True,\n \"analytics_id\": \"UA-209075005-1\",\n}\n\nhtml_favicon = os.path.join(\"..\", \"logo\", \"favicon.ico\")\n\nhtml_static_path = [\"_static\"]\nhtml_css_files = [\"workaround.css\"]\n\n# -- Extension configuration -------------------------------------------------\n\n# sphinx.ext.autodoc\nautodoc_default_options = {\n \"members\": True,\n \"special-members\": True,\n \"show-inheritance\": True,\n}\nautodoc_member_order = \"bysource\"\nautodoc_typehints = \"description\"\n\n# sphinx.ext.intersphinx\nintersphinx_mapping = {\n \"matplotlib\": (\"https://matplotlib.org/stable/\", None),\n \"python\": (\"https://docs.python.org/3\", None),\n \"pytorch-lightning\": (\"https://pytorch-lightning.readthedocs.io/en/latest/\", None),\n \"rasterio\": (\"https://rasterio.readthedocs.io/en/latest/\", None),\n \"rtree\": (\"https://rtree.readthedocs.io/en/latest/\", None),\n \"torch\": (\"https://pytorch.org/docs/stable\", None),\n \"torchvision\": (\"https://pytorch.org/vision/stable\", None),\n}\n\n# nbsphinx\nnbsphinx_execute = \"never\"\n# TODO: branch/tag should change depending on which version of docs you look at\n# TODO: width option of image directive is broken, see:\n# https://github.com/pytorch/pytorch_sphinx_theme/issues/140\nnbsphinx_prolog = \"\"\"\n{% set host = \"https://colab.research.google.com\" %}\n{% set repo = \"microsoft/torchgeo\" %}\n{% set urlpath = \"docs/\" ~ env.docname ~ \".ipynb\" %}\n{% if \"dev\" in env.config.release %}\n {% set branch = \"main\" %}\n{% else %}\n {% set branch = \"releases/v\" ~ env.config.version %}\n{% endif %}\n\n.. image:: {{ host }}/assets/colab-badge.svg\n :class: colabbadge\n :alt: Open in Colab\n :target: {{ host }}/github/{{ repo }}/blob/{{ branch }}/{{ urlpath }}\n\n{% set host = \"https://pccompute.westeurope.cloudapp.azure.com\" %}\n{% set host = host ~ \"/compute/hub/user-redirect/git-pull\" %}\n{% set repo = \"https%3A%2F%2Fgithub.com%2Fmicrosoft%2Ftorchgeo\" %}\n{% set urlpath = \"tree%2Ftorchgeo%2Fdocs%2F\" %}\n{% set urlpath = urlpath ~ env.docname | replace(\"/\", \"%2F\") ~ \".ipynb\" %}\n{% if \"dev\" in env.config.release %}\n {% set branch = \"main\" %}\n{% else %}\n {% set branch = \"releases%2Fv\" ~ env.config.version %}\n{% endif %}\n\n.. image:: https://img.shields.io/badge/-Open%20on%20Planetary%20Computer-blue\n :class: colabbadge\n :alt: Open on Planetary Computer\n :target: {{ host }}?repo={{ repo }}&urlpath={{ urlpath }}&branch={{ branch }}\n\"\"\"\n\n# Disables requirejs in nbsphinx to enable compatibility with the pytorch_sphinx_theme\n# See more information here https://github.com/spatialaudio/nbsphinx/issues/599\n# NOTE: This will likely break nbsphinx widgets\nnbsphinx_requirejs_path = \"\"\n", "path": "docs/conf.py"}]}
| 1,767 | 542 |
gh_patches_debug_21843
|
rasdani/github-patches
|
git_diff
|
pymedusa__Medusa-824
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
UnicodeEncodeError: 'ascii' codec can't encode character u'\u014d' in position 31: ordinal not in range(128)
```
Traceback (most recent call last):
File "/home/orangepi/.sickrage/lib/subliminal/core.py", line 543, in refine
refiner_manager[refiner].plugin(video, **kwargs)
File "/home/orangepi/.sickrage/sickbeard/refiners/release.py", line 54, in refine
release_file = get_release_file(dirpath, fileroot, extension) or release_file
File "/home/orangepi/.sickrage/sickbeard/refiners/release.py", line 90, in get_release_file
if os.path.isfile(release_file):
File "/usr/lib/python2.7/genericpath.py", line 37, in isfile
st = os.stat(path)
UnicodeEncodeError: 'ascii' codec can't encode character u'\u014d' in position 31: ordinal not in range(128)
```
This happens because the subtitle contains a non-ascii char and my locale is set to None.
To fix this we would need to use ek() for isfile() or (better imo) use a similar solution to what we du in subtitles.py. What do you think @ratoaq2?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sickbeard/refiners/release.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 """Release refiner."""
3 from __future__ import unicode_literals
4
5 import logging
6 import os
7
8 from guessit import guessit
9
10 logger = logging.getLogger(__name__)
11
12 MOVIE_ATTRIBUTES = {'title': 'title', 'year': 'year', 'format': 'format', 'release_group': 'release_group',
13 'resolution': 'screen_size', 'video_codec': 'video_codec', 'audio_codec': 'audio_codec'}
14 EPISODE_ATTRIBUTES = {'series': 'title', 'season': 'season', 'episode': 'episode', 'title': 'episode_title',
15 'year': 'year', 'format': 'format', 'release_group': 'release_group', 'resolution': 'screen_size',
16 'video_codec': 'video_codec', 'audio_codec': 'audio_codec'}
17
18
19 def refine(video, release_name=None, release_file=None, extension='release', **kwargs):
20 """Refine a video by using the original release name.
21
22 The refiner will first try:
23 - Read the file video_name.<extension> seeking for a release name
24 - If no release name, it will read the release_file seeking for a release name
25 - If no release name, it will use the release_name passed as an argument
26 - If no release name, then no change in the video object is made
27
28 When a release name is found, the video object will be enhanced using the guessit properties extracted from it.
29
30 Several :class:`~subliminal.video.Video` attributes can be found:
31
32 * :attr:`~subliminal.video.Video.title`
33 * :attr:`~subliminal.video.Video.series`
34 * :attr:`~subliminal.video.Video.season`
35 * :attr:`~subliminal.video.Video.episode`
36 * :attr:`~subliminal.video.Video.year`
37 * :attr:`~subliminal.video.Video.format`
38 * :attr:`~subliminal.video.Video.release_group`
39 * :attr:`~subliminal.video.Video.resolution`
40 * :attr:`~subliminal.video.Video.video_codec`
41 * :attr:`~subliminal.video.Video.audio_codec`
42
43 :param video: the video to refine.
44 :type video: subliminal.video.Video
45 :param str release_name: the release name to be used.
46 :param str release_file: the release file to be used
47 :param str extension: the release file extension.
48 """
49 logger.debug('Starting release refiner [extension={extension}, release_name={name}, release_file={file}]',
50 extension=extension, name=release_name, file=release_file)
51 dirpath, filename = os.path.split(video.name)
52 dirpath = dirpath or '.'
53 fileroot, fileext = os.path.splitext(filename)
54 release_file = get_release_file(dirpath, fileroot, extension) or release_file
55 release_name = get_release_name(release_file) or release_name
56
57 if not release_name:
58 logger.debug('No release name for {video}', video=video.name)
59 return
60
61 release_path = os.path.join(dirpath, release_name + fileext)
62 logger.debug('Guessing using {path}', path=release_path)
63
64 guess = guessit(release_path)
65 attributes = MOVIE_ATTRIBUTES if guess.get('type') == 'movie' else EPISODE_ATTRIBUTES
66 for key, value in attributes.items():
67 old_value = getattr(video, key)
68 new_value = guess.get(value)
69
70 if new_value and old_value != new_value:
71 setattr(video, key, new_value)
72 logger.debug('Attribute {key} changed from {old} to {new}', key=key, old=old_value, new=new_value)
73
74
75 def get_release_file(dirpath, filename, extension):
76 """Return the release file that should contain the release name for a given a `dirpath`, `filename` and `extension`.
77
78 :param dirpath: the file base folder
79 :type dirpath: str
80 :param filename: the file name without extension
81 :type filename: str
82 :param extension:
83 :type extension: the file extension
84 :return: the release file if the file exists
85 :rtype: str
86 """
87 release_file = os.path.join(dirpath, filename + '.' + extension)
88
89 # skip if info file doesn't exist
90 if os.path.isfile(release_file):
91 logger.debug('Found release file {file}', file=release_file)
92 return release_file
93
94
95 def get_release_name(release_file):
96 """Given a `release_file` it will return the release name.
97
98 :param release_file: the text file that contains the release name
99 :type release_file: str
100 :return: the release name
101 :rtype: str
102 """
103 if not release_file:
104 return
105
106 with open(release_file, 'r') as f:
107 release_name = f.read().strip()
108
109 # skip if no release name was found
110 if not release_name:
111 logger.warning('Release file {file} does not contain a release name', file=release_file)
112
113 return release_name
114
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/sickbeard/refiners/release.py b/sickbeard/refiners/release.py
--- a/sickbeard/refiners/release.py
+++ b/sickbeard/refiners/release.py
@@ -1,6 +1,5 @@
# -*- coding: utf-8 -*-
"""Release refiner."""
-from __future__ import unicode_literals
import logging
import os
@@ -10,7 +9,7 @@
logger = logging.getLogger(__name__)
MOVIE_ATTRIBUTES = {'title': 'title', 'year': 'year', 'format': 'format', 'release_group': 'release_group',
- 'resolution': 'screen_size', 'video_codec': 'video_codec', 'audio_codec': 'audio_codec'}
+ 'resolution': 'screen_size', 'video_codec': 'video_codec', 'audio_codec': 'audio_codec'}
EPISODE_ATTRIBUTES = {'series': 'title', 'season': 'season', 'episode': 'episode', 'title': 'episode_title',
'year': 'year', 'format': 'format', 'release_group': 'release_group', 'resolution': 'screen_size',
'video_codec': 'video_codec', 'audio_codec': 'audio_codec'}
|
{"golden_diff": "diff --git a/sickbeard/refiners/release.py b/sickbeard/refiners/release.py\n--- a/sickbeard/refiners/release.py\n+++ b/sickbeard/refiners/release.py\n@@ -1,6 +1,5 @@\n # -*- coding: utf-8 -*-\n \"\"\"Release refiner.\"\"\"\n-from __future__ import unicode_literals\n \n import logging\n import os\n@@ -10,7 +9,7 @@\n logger = logging.getLogger(__name__)\n \n MOVIE_ATTRIBUTES = {'title': 'title', 'year': 'year', 'format': 'format', 'release_group': 'release_group',\n- 'resolution': 'screen_size', 'video_codec': 'video_codec', 'audio_codec': 'audio_codec'}\n+ 'resolution': 'screen_size', 'video_codec': 'video_codec', 'audio_codec': 'audio_codec'}\n EPISODE_ATTRIBUTES = {'series': 'title', 'season': 'season', 'episode': 'episode', 'title': 'episode_title',\n 'year': 'year', 'format': 'format', 'release_group': 'release_group', 'resolution': 'screen_size',\n 'video_codec': 'video_codec', 'audio_codec': 'audio_codec'}\n", "issue": "UnicodeEncodeError: 'ascii' codec can't encode character u'\\u014d' in position 31: ordinal not in range(128)\n```\nTraceback (most recent call last):\n File \"/home/orangepi/.sickrage/lib/subliminal/core.py\", line 543, in refine\n refiner_manager[refiner].plugin(video, **kwargs)\n File \"/home/orangepi/.sickrage/sickbeard/refiners/release.py\", line 54, in refine\n release_file = get_release_file(dirpath, fileroot, extension) or release_file\n File \"/home/orangepi/.sickrage/sickbeard/refiners/release.py\", line 90, in get_release_file\n if os.path.isfile(release_file):\n File \"/usr/lib/python2.7/genericpath.py\", line 37, in isfile\n st = os.stat(path)\nUnicodeEncodeError: 'ascii' codec can't encode character u'\\u014d' in position 31: ordinal not in range(128)\n```\n\nThis happens because the subtitle contains a non-ascii char and my locale is set to None.\nTo fix this we would need to use ek() for isfile() or (better imo) use a similar solution to what we du in subtitles.py. What do you think @ratoaq2?\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"Release refiner.\"\"\"\nfrom __future__ import unicode_literals\n\nimport logging\nimport os\n\nfrom guessit import guessit\n\nlogger = logging.getLogger(__name__)\n\nMOVIE_ATTRIBUTES = {'title': 'title', 'year': 'year', 'format': 'format', 'release_group': 'release_group',\n 'resolution': 'screen_size', 'video_codec': 'video_codec', 'audio_codec': 'audio_codec'}\nEPISODE_ATTRIBUTES = {'series': 'title', 'season': 'season', 'episode': 'episode', 'title': 'episode_title',\n 'year': 'year', 'format': 'format', 'release_group': 'release_group', 'resolution': 'screen_size',\n 'video_codec': 'video_codec', 'audio_codec': 'audio_codec'}\n\n\ndef refine(video, release_name=None, release_file=None, extension='release', **kwargs):\n \"\"\"Refine a video by using the original release name.\n\n The refiner will first try:\n - Read the file video_name.<extension> seeking for a release name\n - If no release name, it will read the release_file seeking for a release name\n - If no release name, it will use the release_name passed as an argument\n - If no release name, then no change in the video object is made\n\n When a release name is found, the video object will be enhanced using the guessit properties extracted from it.\n\n Several :class:`~subliminal.video.Video` attributes can be found:\n\n * :attr:`~subliminal.video.Video.title`\n * :attr:`~subliminal.video.Video.series`\n * :attr:`~subliminal.video.Video.season`\n * :attr:`~subliminal.video.Video.episode`\n * :attr:`~subliminal.video.Video.year`\n * :attr:`~subliminal.video.Video.format`\n * :attr:`~subliminal.video.Video.release_group`\n * :attr:`~subliminal.video.Video.resolution`\n * :attr:`~subliminal.video.Video.video_codec`\n * :attr:`~subliminal.video.Video.audio_codec`\n\n :param video: the video to refine.\n :type video: subliminal.video.Video\n :param str release_name: the release name to be used.\n :param str release_file: the release file to be used\n :param str extension: the release file extension.\n \"\"\"\n logger.debug('Starting release refiner [extension={extension}, release_name={name}, release_file={file}]',\n extension=extension, name=release_name, file=release_file)\n dirpath, filename = os.path.split(video.name)\n dirpath = dirpath or '.'\n fileroot, fileext = os.path.splitext(filename)\n release_file = get_release_file(dirpath, fileroot, extension) or release_file\n release_name = get_release_name(release_file) or release_name\n\n if not release_name:\n logger.debug('No release name for {video}', video=video.name)\n return\n\n release_path = os.path.join(dirpath, release_name + fileext)\n logger.debug('Guessing using {path}', path=release_path)\n\n guess = guessit(release_path)\n attributes = MOVIE_ATTRIBUTES if guess.get('type') == 'movie' else EPISODE_ATTRIBUTES\n for key, value in attributes.items():\n old_value = getattr(video, key)\n new_value = guess.get(value)\n\n if new_value and old_value != new_value:\n setattr(video, key, new_value)\n logger.debug('Attribute {key} changed from {old} to {new}', key=key, old=old_value, new=new_value)\n\n\ndef get_release_file(dirpath, filename, extension):\n \"\"\"Return the release file that should contain the release name for a given a `dirpath`, `filename` and `extension`.\n\n :param dirpath: the file base folder\n :type dirpath: str\n :param filename: the file name without extension\n :type filename: str\n :param extension:\n :type extension: the file extension\n :return: the release file if the file exists\n :rtype: str\n \"\"\"\n release_file = os.path.join(dirpath, filename + '.' + extension)\n\n # skip if info file doesn't exist\n if os.path.isfile(release_file):\n logger.debug('Found release file {file}', file=release_file)\n return release_file\n\n\ndef get_release_name(release_file):\n \"\"\"Given a `release_file` it will return the release name.\n\n :param release_file: the text file that contains the release name\n :type release_file: str\n :return: the release name\n :rtype: str\n \"\"\"\n if not release_file:\n return\n\n with open(release_file, 'r') as f:\n release_name = f.read().strip()\n\n # skip if no release name was found\n if not release_name:\n logger.warning('Release file {file} does not contain a release name', file=release_file)\n\n return release_name\n", "path": "sickbeard/refiners/release.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"Release refiner.\"\"\"\n\nimport logging\nimport os\n\nfrom guessit import guessit\n\nlogger = logging.getLogger(__name__)\n\nMOVIE_ATTRIBUTES = {'title': 'title', 'year': 'year', 'format': 'format', 'release_group': 'release_group',\n 'resolution': 'screen_size', 'video_codec': 'video_codec', 'audio_codec': 'audio_codec'}\nEPISODE_ATTRIBUTES = {'series': 'title', 'season': 'season', 'episode': 'episode', 'title': 'episode_title',\n 'year': 'year', 'format': 'format', 'release_group': 'release_group', 'resolution': 'screen_size',\n 'video_codec': 'video_codec', 'audio_codec': 'audio_codec'}\n\n\ndef refine(video, release_name=None, release_file=None, extension='release', **kwargs):\n \"\"\"Refine a video by using the original release name.\n\n The refiner will first try:\n - Read the file video_name.<extension> seeking for a release name\n - If no release name, it will read the release_file seeking for a release name\n - If no release name, it will use the release_name passed as an argument\n - If no release name, then no change in the video object is made\n\n When a release name is found, the video object will be enhanced using the guessit properties extracted from it.\n\n Several :class:`~subliminal.video.Video` attributes can be found:\n\n * :attr:`~subliminal.video.Video.title`\n * :attr:`~subliminal.video.Video.series`\n * :attr:`~subliminal.video.Video.season`\n * :attr:`~subliminal.video.Video.episode`\n * :attr:`~subliminal.video.Video.year`\n * :attr:`~subliminal.video.Video.format`\n * :attr:`~subliminal.video.Video.release_group`\n * :attr:`~subliminal.video.Video.resolution`\n * :attr:`~subliminal.video.Video.video_codec`\n * :attr:`~subliminal.video.Video.audio_codec`\n\n :param video: the video to refine.\n :type video: subliminal.video.Video\n :param str release_name: the release name to be used.\n :param str release_file: the release file to be used\n :param str extension: the release file extension.\n \"\"\"\n logger.debug('Starting release refiner [extension={extension}, release_name={name}, release_file={file}]',\n extension=extension, name=release_name, file=release_file)\n dirpath, filename = os.path.split(video.name)\n dirpath = dirpath or '.'\n fileroot, fileext = os.path.splitext(filename)\n release_file = get_release_file(dirpath, fileroot, extension) or release_file\n release_name = get_release_name(release_file) or release_name\n\n if not release_name:\n logger.debug('No release name for {video}', video=video.name)\n return\n\n release_path = os.path.join(dirpath, release_name + fileext)\n logger.debug('Guessing using {path}', path=release_path)\n\n guess = guessit(release_path)\n attributes = MOVIE_ATTRIBUTES if guess.get('type') == 'movie' else EPISODE_ATTRIBUTES\n for key, value in attributes.items():\n old_value = getattr(video, key)\n new_value = guess.get(value)\n\n if new_value and old_value != new_value:\n setattr(video, key, new_value)\n logger.debug('Attribute {key} changed from {old} to {new}', key=key, old=old_value, new=new_value)\n\n\ndef get_release_file(dirpath, filename, extension):\n \"\"\"Return the release file that should contain the release name for a given a `dirpath`, `filename` and `extension`.\n\n :param dirpath: the file base folder\n :type dirpath: str\n :param filename: the file name without extension\n :type filename: str\n :param extension:\n :type extension: the file extension\n :return: the release file if the file exists\n :rtype: str\n \"\"\"\n release_file = os.path.join(dirpath, filename + '.' + extension)\n\n # skip if info file doesn't exist\n if os.path.isfile(release_file):\n logger.debug('Found release file {file}', file=release_file)\n return release_file\n\n\ndef get_release_name(release_file):\n \"\"\"Given a `release_file` it will return the release name.\n\n :param release_file: the text file that contains the release name\n :type release_file: str\n :return: the release name\n :rtype: str\n \"\"\"\n if not release_file:\n return\n\n with open(release_file, 'r') as f:\n release_name = f.read().strip()\n\n # skip if no release name was found\n if not release_name:\n logger.warning('Release file {file} does not contain a release name', file=release_file)\n\n return release_name\n", "path": "sickbeard/refiners/release.py"}]}
| 1,896 | 255 |
gh_patches_debug_4872
|
rasdani/github-patches
|
git_diff
|
matrix-org__synapse-8517
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
/displayname returns non-JSON body
### Description
An attempt to call `/profile/{}/displayname` with an empty body results (rightfully) in a 400 error, with plain text "Unable to parse name" in the response. The expected error payload is something about (could also be `M_NOT_JSON` instead of `M_BAD_JSON`):
```json
{
"errcode": "M_BAD_JSON",
"error": "Unable to parse name"
}
```
### Version information
<!-- IMPORTANT: please answer the following questions, to help us narrow down the problem -->
<!-- Was this issue identified on matrix.org or another homeserver? -->
- **Homeserver**: matrix.org
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `synapse/rest/client/v1/profile.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # Copyright 2014-2016 OpenMarket Ltd
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15
16 """ This module contains REST servlets to do with profile: /profile/<paths> """
17
18 from synapse.api.errors import Codes, SynapseError
19 from synapse.http.servlet import RestServlet, parse_json_object_from_request
20 from synapse.rest.client.v2_alpha._base import client_patterns
21 from synapse.types import UserID
22
23
24 class ProfileDisplaynameRestServlet(RestServlet):
25 PATTERNS = client_patterns("/profile/(?P<user_id>[^/]*)/displayname", v1=True)
26
27 def __init__(self, hs):
28 super().__init__()
29 self.hs = hs
30 self.profile_handler = hs.get_profile_handler()
31 self.auth = hs.get_auth()
32
33 async def on_GET(self, request, user_id):
34 requester_user = None
35
36 if self.hs.config.require_auth_for_profile_requests:
37 requester = await self.auth.get_user_by_req(request)
38 requester_user = requester.user
39
40 user = UserID.from_string(user_id)
41
42 await self.profile_handler.check_profile_query_allowed(user, requester_user)
43
44 displayname = await self.profile_handler.get_displayname(user)
45
46 ret = {}
47 if displayname is not None:
48 ret["displayname"] = displayname
49
50 return 200, ret
51
52 async def on_PUT(self, request, user_id):
53 requester = await self.auth.get_user_by_req(request, allow_guest=True)
54 user = UserID.from_string(user_id)
55 is_admin = await self.auth.is_server_admin(requester.user)
56
57 content = parse_json_object_from_request(request)
58
59 try:
60 new_name = content["displayname"]
61 except Exception:
62 return 400, "Unable to parse name"
63
64 await self.profile_handler.set_displayname(user, requester, new_name, is_admin)
65
66 return 200, {}
67
68 def on_OPTIONS(self, request, user_id):
69 return 200, {}
70
71
72 class ProfileAvatarURLRestServlet(RestServlet):
73 PATTERNS = client_patterns("/profile/(?P<user_id>[^/]*)/avatar_url", v1=True)
74
75 def __init__(self, hs):
76 super().__init__()
77 self.hs = hs
78 self.profile_handler = hs.get_profile_handler()
79 self.auth = hs.get_auth()
80
81 async def on_GET(self, request, user_id):
82 requester_user = None
83
84 if self.hs.config.require_auth_for_profile_requests:
85 requester = await self.auth.get_user_by_req(request)
86 requester_user = requester.user
87
88 user = UserID.from_string(user_id)
89
90 await self.profile_handler.check_profile_query_allowed(user, requester_user)
91
92 avatar_url = await self.profile_handler.get_avatar_url(user)
93
94 ret = {}
95 if avatar_url is not None:
96 ret["avatar_url"] = avatar_url
97
98 return 200, ret
99
100 async def on_PUT(self, request, user_id):
101 requester = await self.auth.get_user_by_req(request)
102 user = UserID.from_string(user_id)
103 is_admin = await self.auth.is_server_admin(requester.user)
104
105 content = parse_json_object_from_request(request)
106 try:
107 new_avatar_url = content["avatar_url"]
108 except KeyError:
109 raise SynapseError(
110 400, "Missing key 'avatar_url'", errcode=Codes.MISSING_PARAM
111 )
112
113 await self.profile_handler.set_avatar_url(
114 user, requester, new_avatar_url, is_admin
115 )
116
117 return 200, {}
118
119 def on_OPTIONS(self, request, user_id):
120 return 200, {}
121
122
123 class ProfileRestServlet(RestServlet):
124 PATTERNS = client_patterns("/profile/(?P<user_id>[^/]*)", v1=True)
125
126 def __init__(self, hs):
127 super().__init__()
128 self.hs = hs
129 self.profile_handler = hs.get_profile_handler()
130 self.auth = hs.get_auth()
131
132 async def on_GET(self, request, user_id):
133 requester_user = None
134
135 if self.hs.config.require_auth_for_profile_requests:
136 requester = await self.auth.get_user_by_req(request)
137 requester_user = requester.user
138
139 user = UserID.from_string(user_id)
140
141 await self.profile_handler.check_profile_query_allowed(user, requester_user)
142
143 displayname = await self.profile_handler.get_displayname(user)
144 avatar_url = await self.profile_handler.get_avatar_url(user)
145
146 ret = {}
147 if displayname is not None:
148 ret["displayname"] = displayname
149 if avatar_url is not None:
150 ret["avatar_url"] = avatar_url
151
152 return 200, ret
153
154
155 def register_servlets(hs, http_server):
156 ProfileDisplaynameRestServlet(hs).register(http_server)
157 ProfileAvatarURLRestServlet(hs).register(http_server)
158 ProfileRestServlet(hs).register(http_server)
159
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/synapse/rest/client/v1/profile.py b/synapse/rest/client/v1/profile.py
--- a/synapse/rest/client/v1/profile.py
+++ b/synapse/rest/client/v1/profile.py
@@ -59,7 +59,9 @@
try:
new_name = content["displayname"]
except Exception:
- return 400, "Unable to parse name"
+ raise SynapseError(
+ code=400, msg="Unable to parse name", errcode=Codes.BAD_JSON,
+ )
await self.profile_handler.set_displayname(user, requester, new_name, is_admin)
|
{"golden_diff": "diff --git a/synapse/rest/client/v1/profile.py b/synapse/rest/client/v1/profile.py\n--- a/synapse/rest/client/v1/profile.py\n+++ b/synapse/rest/client/v1/profile.py\n@@ -59,7 +59,9 @@\n try:\n new_name = content[\"displayname\"]\n except Exception:\n- return 400, \"Unable to parse name\"\n+ raise SynapseError(\n+ code=400, msg=\"Unable to parse name\", errcode=Codes.BAD_JSON,\n+ )\n \n await self.profile_handler.set_displayname(user, requester, new_name, is_admin)\n", "issue": "/displayname returns non-JSON body\n### Description\r\n\r\nAn attempt to call `/profile/{}/displayname` with an empty body results (rightfully) in a 400 error, with plain text \"Unable to parse name\" in the response. The expected error payload is something about (could also be `M_NOT_JSON` instead of `M_BAD_JSON`):\r\n```json\r\n{\r\n \"errcode\": \"M_BAD_JSON\",\r\n \"error\": \"Unable to parse name\" \r\n}\r\n```\r\n\r\n### Version information\r\n\r\n<!-- IMPORTANT: please answer the following questions, to help us narrow down the problem -->\r\n\r\n<!-- Was this issue identified on matrix.org or another homeserver? -->\r\n- **Homeserver**: matrix.org\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright 2014-2016 OpenMarket Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\" This module contains REST servlets to do with profile: /profile/<paths> \"\"\"\n\nfrom synapse.api.errors import Codes, SynapseError\nfrom synapse.http.servlet import RestServlet, parse_json_object_from_request\nfrom synapse.rest.client.v2_alpha._base import client_patterns\nfrom synapse.types import UserID\n\n\nclass ProfileDisplaynameRestServlet(RestServlet):\n PATTERNS = client_patterns(\"/profile/(?P<user_id>[^/]*)/displayname\", v1=True)\n\n def __init__(self, hs):\n super().__init__()\n self.hs = hs\n self.profile_handler = hs.get_profile_handler()\n self.auth = hs.get_auth()\n\n async def on_GET(self, request, user_id):\n requester_user = None\n\n if self.hs.config.require_auth_for_profile_requests:\n requester = await self.auth.get_user_by_req(request)\n requester_user = requester.user\n\n user = UserID.from_string(user_id)\n\n await self.profile_handler.check_profile_query_allowed(user, requester_user)\n\n displayname = await self.profile_handler.get_displayname(user)\n\n ret = {}\n if displayname is not None:\n ret[\"displayname\"] = displayname\n\n return 200, ret\n\n async def on_PUT(self, request, user_id):\n requester = await self.auth.get_user_by_req(request, allow_guest=True)\n user = UserID.from_string(user_id)\n is_admin = await self.auth.is_server_admin(requester.user)\n\n content = parse_json_object_from_request(request)\n\n try:\n new_name = content[\"displayname\"]\n except Exception:\n return 400, \"Unable to parse name\"\n\n await self.profile_handler.set_displayname(user, requester, new_name, is_admin)\n\n return 200, {}\n\n def on_OPTIONS(self, request, user_id):\n return 200, {}\n\n\nclass ProfileAvatarURLRestServlet(RestServlet):\n PATTERNS = client_patterns(\"/profile/(?P<user_id>[^/]*)/avatar_url\", v1=True)\n\n def __init__(self, hs):\n super().__init__()\n self.hs = hs\n self.profile_handler = hs.get_profile_handler()\n self.auth = hs.get_auth()\n\n async def on_GET(self, request, user_id):\n requester_user = None\n\n if self.hs.config.require_auth_for_profile_requests:\n requester = await self.auth.get_user_by_req(request)\n requester_user = requester.user\n\n user = UserID.from_string(user_id)\n\n await self.profile_handler.check_profile_query_allowed(user, requester_user)\n\n avatar_url = await self.profile_handler.get_avatar_url(user)\n\n ret = {}\n if avatar_url is not None:\n ret[\"avatar_url\"] = avatar_url\n\n return 200, ret\n\n async def on_PUT(self, request, user_id):\n requester = await self.auth.get_user_by_req(request)\n user = UserID.from_string(user_id)\n is_admin = await self.auth.is_server_admin(requester.user)\n\n content = parse_json_object_from_request(request)\n try:\n new_avatar_url = content[\"avatar_url\"]\n except KeyError:\n raise SynapseError(\n 400, \"Missing key 'avatar_url'\", errcode=Codes.MISSING_PARAM\n )\n\n await self.profile_handler.set_avatar_url(\n user, requester, new_avatar_url, is_admin\n )\n\n return 200, {}\n\n def on_OPTIONS(self, request, user_id):\n return 200, {}\n\n\nclass ProfileRestServlet(RestServlet):\n PATTERNS = client_patterns(\"/profile/(?P<user_id>[^/]*)\", v1=True)\n\n def __init__(self, hs):\n super().__init__()\n self.hs = hs\n self.profile_handler = hs.get_profile_handler()\n self.auth = hs.get_auth()\n\n async def on_GET(self, request, user_id):\n requester_user = None\n\n if self.hs.config.require_auth_for_profile_requests:\n requester = await self.auth.get_user_by_req(request)\n requester_user = requester.user\n\n user = UserID.from_string(user_id)\n\n await self.profile_handler.check_profile_query_allowed(user, requester_user)\n\n displayname = await self.profile_handler.get_displayname(user)\n avatar_url = await self.profile_handler.get_avatar_url(user)\n\n ret = {}\n if displayname is not None:\n ret[\"displayname\"] = displayname\n if avatar_url is not None:\n ret[\"avatar_url\"] = avatar_url\n\n return 200, ret\n\n\ndef register_servlets(hs, http_server):\n ProfileDisplaynameRestServlet(hs).register(http_server)\n ProfileAvatarURLRestServlet(hs).register(http_server)\n ProfileRestServlet(hs).register(http_server)\n", "path": "synapse/rest/client/v1/profile.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright 2014-2016 OpenMarket Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\" This module contains REST servlets to do with profile: /profile/<paths> \"\"\"\n\nfrom synapse.api.errors import Codes, SynapseError\nfrom synapse.http.servlet import RestServlet, parse_json_object_from_request\nfrom synapse.rest.client.v2_alpha._base import client_patterns\nfrom synapse.types import UserID\n\n\nclass ProfileDisplaynameRestServlet(RestServlet):\n PATTERNS = client_patterns(\"/profile/(?P<user_id>[^/]*)/displayname\", v1=True)\n\n def __init__(self, hs):\n super().__init__()\n self.hs = hs\n self.profile_handler = hs.get_profile_handler()\n self.auth = hs.get_auth()\n\n async def on_GET(self, request, user_id):\n requester_user = None\n\n if self.hs.config.require_auth_for_profile_requests:\n requester = await self.auth.get_user_by_req(request)\n requester_user = requester.user\n\n user = UserID.from_string(user_id)\n\n await self.profile_handler.check_profile_query_allowed(user, requester_user)\n\n displayname = await self.profile_handler.get_displayname(user)\n\n ret = {}\n if displayname is not None:\n ret[\"displayname\"] = displayname\n\n return 200, ret\n\n async def on_PUT(self, request, user_id):\n requester = await self.auth.get_user_by_req(request, allow_guest=True)\n user = UserID.from_string(user_id)\n is_admin = await self.auth.is_server_admin(requester.user)\n\n content = parse_json_object_from_request(request)\n\n try:\n new_name = content[\"displayname\"]\n except Exception:\n raise SynapseError(\n code=400, msg=\"Unable to parse name\", errcode=Codes.BAD_JSON,\n )\n\n await self.profile_handler.set_displayname(user, requester, new_name, is_admin)\n\n return 200, {}\n\n def on_OPTIONS(self, request, user_id):\n return 200, {}\n\n\nclass ProfileAvatarURLRestServlet(RestServlet):\n PATTERNS = client_patterns(\"/profile/(?P<user_id>[^/]*)/avatar_url\", v1=True)\n\n def __init__(self, hs):\n super().__init__()\n self.hs = hs\n self.profile_handler = hs.get_profile_handler()\n self.auth = hs.get_auth()\n\n async def on_GET(self, request, user_id):\n requester_user = None\n\n if self.hs.config.require_auth_for_profile_requests:\n requester = await self.auth.get_user_by_req(request)\n requester_user = requester.user\n\n user = UserID.from_string(user_id)\n\n await self.profile_handler.check_profile_query_allowed(user, requester_user)\n\n avatar_url = await self.profile_handler.get_avatar_url(user)\n\n ret = {}\n if avatar_url is not None:\n ret[\"avatar_url\"] = avatar_url\n\n return 200, ret\n\n async def on_PUT(self, request, user_id):\n requester = await self.auth.get_user_by_req(request)\n user = UserID.from_string(user_id)\n is_admin = await self.auth.is_server_admin(requester.user)\n\n content = parse_json_object_from_request(request)\n try:\n new_avatar_url = content[\"avatar_url\"]\n except KeyError:\n raise SynapseError(\n 400, \"Missing key 'avatar_url'\", errcode=Codes.MISSING_PARAM\n )\n\n await self.profile_handler.set_avatar_url(\n user, requester, new_avatar_url, is_admin\n )\n\n return 200, {}\n\n def on_OPTIONS(self, request, user_id):\n return 200, {}\n\n\nclass ProfileRestServlet(RestServlet):\n PATTERNS = client_patterns(\"/profile/(?P<user_id>[^/]*)\", v1=True)\n\n def __init__(self, hs):\n super().__init__()\n self.hs = hs\n self.profile_handler = hs.get_profile_handler()\n self.auth = hs.get_auth()\n\n async def on_GET(self, request, user_id):\n requester_user = None\n\n if self.hs.config.require_auth_for_profile_requests:\n requester = await self.auth.get_user_by_req(request)\n requester_user = requester.user\n\n user = UserID.from_string(user_id)\n\n await self.profile_handler.check_profile_query_allowed(user, requester_user)\n\n displayname = await self.profile_handler.get_displayname(user)\n avatar_url = await self.profile_handler.get_avatar_url(user)\n\n ret = {}\n if displayname is not None:\n ret[\"displayname\"] = displayname\n if avatar_url is not None:\n ret[\"avatar_url\"] = avatar_url\n\n return 200, ret\n\n\ndef register_servlets(hs, http_server):\n ProfileDisplaynameRestServlet(hs).register(http_server)\n ProfileAvatarURLRestServlet(hs).register(http_server)\n ProfileRestServlet(hs).register(http_server)\n", "path": "synapse/rest/client/v1/profile.py"}]}
| 1,963 | 141 |
gh_patches_debug_12430
|
rasdani/github-patches
|
git_diff
|
scrapy__scrapy-2065
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
IOError, 'Not a gzipped file'
while trying to access sitemap from robots.txt , Scrapy fails with **IOError, 'Not a gzipped file'** error
not sure if this issue is related to following issue(s)
https://github.com/scrapy/scrapy/issues/193 -> closed issue
https://github.com/scrapy/scrapy/pull/660 -> merged pull request to address issue 193
https://github.com/scrapy/scrapy/issues/951 -> open issue
> line where code fails in gzip.py at line # 197
>
> ``` python
> def _read_gzip_header(self):
> magic = self.fileobj.read(2)
> if magic != '\037\213':
> raise IOError, 'Not a gzipped file'
> ```
# Response Header
```
Content-Encoding: gzip
Accept-Ranges: bytes
X-Amz-Request-Id: BFFF010DDE6268DA
Vary: Accept-Encoding
Server: AmazonS3
Last-Modified: Wed, 15 Jun 2016 19:02:20 GMT
Etag: "300bb71d6897cb2a22bba0bd07978c84"
Cache-Control: no-transform
Date: Sun, 19 Jun 2016 10:54:53 GMT
Content-Type: binary/octet-stream
```
Error Log:
``` log
Traceback (most recent call last):
File "c:\venv\scrapy1.0\lib\site-packages\scrapy\utils\defer.py", line 102, in iter_errback
yield next(it)
File "c:\venv\scrapy1.0\lib\site-packages\scrapy\spidermiddlewares\offsite.py", line 29, in process_spider_output
for x in result:
File "c:\venv\scrapy1.0\lib\site-packages\scrapy\spidermiddlewares\referer.py", line 22, in <genexpr>
return (_set_referer(r) for r in result or ())
File "c:\venv\scrapy1.0\lib\site-packages\scrapy\spidermiddlewares\urllength.py", line 37, in <genexpr>
return (r for r in result or () if _filter(r))
File "c:\venv\scrapy1.0\lib\site-packages\scrapy\spidermiddlewares\depth.py", line 58, in <genexpr>
return (r for r in result or () if _filter(r))
File "D:\projects\sitemap_spider\sitemap_spider\spiders\mainspider.py", line 31, in _parse_sitemap
body = self._get_sitemap_body(response)
File "c:\venv\scrapy1.0\lib\site-packages\scrapy\spiders\sitemap.py", line 67, in _get_sitemap_body
return gunzip(response.body)
File "c:\venv\scrapy1.0\lib\site-packages\scrapy\utils\gz.py", line 37, in gunzip
chunk = read1(f, 8196)
File "c:\venv\scrapy1.0\lib\site-packages\scrapy\utils\gz.py", line 21, in read1
return gzf.read(size)
File "c:\python27\Lib\gzip.py", line 268, in read
self._read(readsize)
File "c:\python27\Lib\gzip.py", line 303, in _read
self._read_gzip_header()
File "c:\python27\Lib\gzip.py", line 197, in _read_gzip_header
raise IOError, 'Not a gzipped file'
```
i did download file manually and was able to extract the content so it is not like file is corrupted
as an example sitemap url : you can follow amazon robots.txt
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scrapy/utils/gz.py`
Content:
```
1 import struct
2
3 try:
4 from cStringIO import StringIO as BytesIO
5 except ImportError:
6 from io import BytesIO
7 from gzip import GzipFile
8
9 import six
10 import re
11
12 # - Python>=3.5 GzipFile's read() has issues returning leftover
13 # uncompressed data when input is corrupted
14 # (regression or bug-fix compared to Python 3.4)
15 # - read1(), which fetches data before raising EOFError on next call
16 # works here but is only available from Python>=3.3
17 # - scrapy does not support Python 3.2
18 # - Python 2.7 GzipFile works fine with standard read() + extrabuf
19 if six.PY2:
20 def read1(gzf, size=-1):
21 return gzf.read(size)
22 else:
23 def read1(gzf, size=-1):
24 return gzf.read1(size)
25
26
27 def gunzip(data):
28 """Gunzip the given data and return as much data as possible.
29
30 This is resilient to CRC checksum errors.
31 """
32 f = GzipFile(fileobj=BytesIO(data))
33 output = b''
34 chunk = b'.'
35 while chunk:
36 try:
37 chunk = read1(f, 8196)
38 output += chunk
39 except (IOError, EOFError, struct.error):
40 # complete only if there is some data, otherwise re-raise
41 # see issue 87 about catching struct.error
42 # some pages are quite small so output is '' and f.extrabuf
43 # contains the whole page content
44 if output or getattr(f, 'extrabuf', None):
45 try:
46 output += f.extrabuf
47 finally:
48 break
49 else:
50 raise
51 return output
52
53 _is_gzipped_re = re.compile(br'^application/(x-)?gzip\b', re.I)
54
55 def is_gzipped(response):
56 """Return True if the response is gzipped, or False otherwise"""
57 ctype = response.headers.get('Content-Type', b'')
58 return _is_gzipped_re.search(ctype) is not None
59
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/scrapy/utils/gz.py b/scrapy/utils/gz.py
--- a/scrapy/utils/gz.py
+++ b/scrapy/utils/gz.py
@@ -50,9 +50,12 @@
raise
return output
-_is_gzipped_re = re.compile(br'^application/(x-)?gzip\b', re.I)
+_is_gzipped = re.compile(br'^application/(x-)?gzip\b', re.I).search
+_is_octetstream = re.compile(br'^(application|binary)/octet-stream\b', re.I).search
def is_gzipped(response):
"""Return True if the response is gzipped, or False otherwise"""
ctype = response.headers.get('Content-Type', b'')
- return _is_gzipped_re.search(ctype) is not None
+ cenc = response.headers.get('Content-Encoding', b'').lower()
+ return (_is_gzipped(ctype) or
+ (_is_octetstream(ctype) and cenc in (b'gzip', b'x-gzip')))
|
{"golden_diff": "diff --git a/scrapy/utils/gz.py b/scrapy/utils/gz.py\n--- a/scrapy/utils/gz.py\n+++ b/scrapy/utils/gz.py\n@@ -50,9 +50,12 @@\n raise\n return output\n \n-_is_gzipped_re = re.compile(br'^application/(x-)?gzip\\b', re.I)\n+_is_gzipped = re.compile(br'^application/(x-)?gzip\\b', re.I).search\n+_is_octetstream = re.compile(br'^(application|binary)/octet-stream\\b', re.I).search\n \n def is_gzipped(response):\n \"\"\"Return True if the response is gzipped, or False otherwise\"\"\"\n ctype = response.headers.get('Content-Type', b'')\n- return _is_gzipped_re.search(ctype) is not None\n+ cenc = response.headers.get('Content-Encoding', b'').lower()\n+ return (_is_gzipped(ctype) or\n+ (_is_octetstream(ctype) and cenc in (b'gzip', b'x-gzip')))\n", "issue": "IOError, 'Not a gzipped file'\nwhile trying to access sitemap from robots.txt , Scrapy fails with **IOError, 'Not a gzipped file'** error\n\nnot sure if this issue is related to following issue(s)\nhttps://github.com/scrapy/scrapy/issues/193 -> closed issue\nhttps://github.com/scrapy/scrapy/pull/660 -> merged pull request to address issue 193\nhttps://github.com/scrapy/scrapy/issues/951 -> open issue\n\n> line where code fails in gzip.py at line # 197\n> \n> ``` python\n> def _read_gzip_header(self):\n> magic = self.fileobj.read(2)\n> if magic != '\\037\\213':\n> raise IOError, 'Not a gzipped file'\n> ```\n# Response Header\n\n```\nContent-Encoding: gzip\nAccept-Ranges: bytes\nX-Amz-Request-Id: BFFF010DDE6268DA\nVary: Accept-Encoding\nServer: AmazonS3\nLast-Modified: Wed, 15 Jun 2016 19:02:20 GMT\nEtag: \"300bb71d6897cb2a22bba0bd07978c84\"\nCache-Control: no-transform\nDate: Sun, 19 Jun 2016 10:54:53 GMT\nContent-Type: binary/octet-stream\n```\n\nError Log:\n\n``` log\n Traceback (most recent call last):\n File \"c:\\venv\\scrapy1.0\\lib\\site-packages\\scrapy\\utils\\defer.py\", line 102, in iter_errback\n yield next(it)\n File \"c:\\venv\\scrapy1.0\\lib\\site-packages\\scrapy\\spidermiddlewares\\offsite.py\", line 29, in process_spider_output\n for x in result:\n File \"c:\\venv\\scrapy1.0\\lib\\site-packages\\scrapy\\spidermiddlewares\\referer.py\", line 22, in <genexpr>\n return (_set_referer(r) for r in result or ())\n File \"c:\\venv\\scrapy1.0\\lib\\site-packages\\scrapy\\spidermiddlewares\\urllength.py\", line 37, in <genexpr>\n return (r for r in result or () if _filter(r))\n File \"c:\\venv\\scrapy1.0\\lib\\site-packages\\scrapy\\spidermiddlewares\\depth.py\", line 58, in <genexpr>\n return (r for r in result or () if _filter(r))\n File \"D:\\projects\\sitemap_spider\\sitemap_spider\\spiders\\mainspider.py\", line 31, in _parse_sitemap\n body = self._get_sitemap_body(response)\n File \"c:\\venv\\scrapy1.0\\lib\\site-packages\\scrapy\\spiders\\sitemap.py\", line 67, in _get_sitemap_body\n return gunzip(response.body)\n File \"c:\\venv\\scrapy1.0\\lib\\site-packages\\scrapy\\utils\\gz.py\", line 37, in gunzip\n chunk = read1(f, 8196)\n File \"c:\\venv\\scrapy1.0\\lib\\site-packages\\scrapy\\utils\\gz.py\", line 21, in read1\n return gzf.read(size)\n File \"c:\\python27\\Lib\\gzip.py\", line 268, in read\n self._read(readsize)\n File \"c:\\python27\\Lib\\gzip.py\", line 303, in _read\n self._read_gzip_header()\n File \"c:\\python27\\Lib\\gzip.py\", line 197, in _read_gzip_header\n raise IOError, 'Not a gzipped file'\n```\n\ni did download file manually and was able to extract the content so it is not like file is corrupted \n\nas an example sitemap url : you can follow amazon robots.txt\n\n", "before_files": [{"content": "import struct\n\ntry:\n from cStringIO import StringIO as BytesIO\nexcept ImportError:\n from io import BytesIO\nfrom gzip import GzipFile\n\nimport six\nimport re\n\n# - Python>=3.5 GzipFile's read() has issues returning leftover\n# uncompressed data when input is corrupted\n# (regression or bug-fix compared to Python 3.4)\n# - read1(), which fetches data before raising EOFError on next call\n# works here but is only available from Python>=3.3\n# - scrapy does not support Python 3.2\n# - Python 2.7 GzipFile works fine with standard read() + extrabuf\nif six.PY2:\n def read1(gzf, size=-1):\n return gzf.read(size)\nelse:\n def read1(gzf, size=-1):\n return gzf.read1(size)\n\n\ndef gunzip(data):\n \"\"\"Gunzip the given data and return as much data as possible.\n\n This is resilient to CRC checksum errors.\n \"\"\"\n f = GzipFile(fileobj=BytesIO(data))\n output = b''\n chunk = b'.'\n while chunk:\n try:\n chunk = read1(f, 8196)\n output += chunk\n except (IOError, EOFError, struct.error):\n # complete only if there is some data, otherwise re-raise\n # see issue 87 about catching struct.error\n # some pages are quite small so output is '' and f.extrabuf\n # contains the whole page content\n if output or getattr(f, 'extrabuf', None):\n try:\n output += f.extrabuf\n finally:\n break\n else:\n raise\n return output\n\n_is_gzipped_re = re.compile(br'^application/(x-)?gzip\\b', re.I)\n\ndef is_gzipped(response):\n \"\"\"Return True if the response is gzipped, or False otherwise\"\"\"\n ctype = response.headers.get('Content-Type', b'')\n return _is_gzipped_re.search(ctype) is not None\n", "path": "scrapy/utils/gz.py"}], "after_files": [{"content": "import struct\n\ntry:\n from cStringIO import StringIO as BytesIO\nexcept ImportError:\n from io import BytesIO\nfrom gzip import GzipFile\n\nimport six\nimport re\n\n# - Python>=3.5 GzipFile's read() has issues returning leftover\n# uncompressed data when input is corrupted\n# (regression or bug-fix compared to Python 3.4)\n# - read1(), which fetches data before raising EOFError on next call\n# works here but is only available from Python>=3.3\n# - scrapy does not support Python 3.2\n# - Python 2.7 GzipFile works fine with standard read() + extrabuf\nif six.PY2:\n def read1(gzf, size=-1):\n return gzf.read(size)\nelse:\n def read1(gzf, size=-1):\n return gzf.read1(size)\n\n\ndef gunzip(data):\n \"\"\"Gunzip the given data and return as much data as possible.\n\n This is resilient to CRC checksum errors.\n \"\"\"\n f = GzipFile(fileobj=BytesIO(data))\n output = b''\n chunk = b'.'\n while chunk:\n try:\n chunk = read1(f, 8196)\n output += chunk\n except (IOError, EOFError, struct.error):\n # complete only if there is some data, otherwise re-raise\n # see issue 87 about catching struct.error\n # some pages are quite small so output is '' and f.extrabuf\n # contains the whole page content\n if output or getattr(f, 'extrabuf', None):\n try:\n output += f.extrabuf\n finally:\n break\n else:\n raise\n return output\n\n_is_gzipped = re.compile(br'^application/(x-)?gzip\\b', re.I).search\n_is_octetstream = re.compile(br'^(application|binary)/octet-stream\\b', re.I).search\n\ndef is_gzipped(response):\n \"\"\"Return True if the response is gzipped, or False otherwise\"\"\"\n ctype = response.headers.get('Content-Type', b'')\n cenc = response.headers.get('Content-Encoding', b'').lower()\n return (_is_gzipped(ctype) or\n (_is_octetstream(ctype) and cenc in (b'gzip', b'x-gzip')))\n", "path": "scrapy/utils/gz.py"}]}
| 1,743 | 233 |
gh_patches_debug_32401
|
rasdani/github-patches
|
git_diff
|
sunpy__sunpy-1961
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
tests create figure hashes directory
when running `py.tests` on single file it creates unneeded tmp directories.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sunpy/conftest.py`
Content:
```
1 from __future__ import absolute_import, print_function
2 from functools import partial
3
4 import os
5 import socket
6 import tempfile
7 import json
8
9 from sunpy.extern.six.moves.urllib.request import urlopen
10 from sunpy.extern.six.moves.urllib.error import URLError
11
12 import pytest
13
14
15 # Force MPL to use non-gui backends for testing.
16 try:
17 import matplotlib
18 except ImportError:
19 pass
20 else:
21 matplotlib.use('Agg')
22
23 from astropy.tests import disable_internet
24
25 from sunpy.tests import hash
26
27 hash_library_original_len = len(hash.hash_library)
28
29 GOOGLE_URL = 'http://www.google.com'
30
31
32 def site_reachable(url):
33 try:
34 urlopen(url, timeout=1)
35 except (URLError, socket.timeout):
36 return False
37 else:
38 return True
39
40
41 is_online = partial(site_reachable, GOOGLE_URL)
42
43
44 def pytest_runtest_setup(item):
45 """
46 pytest hook to skip all tests that have the mark 'online' if the
47 client is online (simply detected by checking whether http://www.google.com
48 can be requested).
49 """
50 if isinstance(item, item.Function):
51 if 'online' in item.keywords and not is_online():
52 msg = 'skipping test {0} (reason: client seems to be offline)'
53 pytest.skip(msg.format(item.name))
54
55 if 'online' not in item.keywords:
56 disable_internet.turn_off_internet()
57
58
59 def pytest_runtest_teardown(item, nextitem):
60 disable_internet.turn_on_internet()
61
62
63 def pytest_unconfigure(config):
64 tempdir = tempfile.mkdtemp(suffix="_figures")
65 # the hash_library is indexed by the name of the test but we want to look
66 # things up with the hash value
67 inv_hash_library = {v: k for k, v in hash.hash_library.items()}
68
69 for h in hash.file_list:
70 test_name = inv_hash_library.get(h, '')
71 if test_name != '':
72 os.rename(hash.file_list[h], os.path.join(tempdir,
73 test_name + '.png'))
74 print('All test files for figure hashes can be found in {0}'.format(
75 tempdir))
76
77 # Check if additions have been made to the hash library
78 if len(hash.hash_library) > hash_library_original_len:
79 # Write the new hash library in JSON
80 tempdir = tempfile.mkdtemp()
81 hashfile = os.path.join(tempdir, hash.HASH_LIBRARY_NAME)
82 with open(hashfile, 'wb') as outfile:
83 json.dump(
84 hash.hash_library,
85 outfile,
86 sort_keys=True,
87 indent=4,
88 separators=(',', ': '))
89 print(
90 "The hash library has expanded and should be copied to sunpy/tests/")
91 print(" " + hashfile)
92
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/sunpy/conftest.py b/sunpy/conftest.py
--- a/sunpy/conftest.py
+++ b/sunpy/conftest.py
@@ -22,9 +22,8 @@
from astropy.tests import disable_internet
-from sunpy.tests import hash
-
-hash_library_original_len = len(hash.hash_library)
+from sunpy.tests.hash import HASH_LIBRARY_NAME
+from sunpy.tests.helpers import new_hash_library, figure_test_pngfiles
GOOGLE_URL = 'http://www.google.com'
@@ -61,31 +60,17 @@
def pytest_unconfigure(config):
- tempdir = tempfile.mkdtemp(suffix="_figures")
- # the hash_library is indexed by the name of the test but we want to look
- # things up with the hash value
- inv_hash_library = {v: k for k, v in hash.hash_library.items()}
-
- for h in hash.file_list:
- test_name = inv_hash_library.get(h, '')
- if test_name != '':
- os.rename(hash.file_list[h], os.path.join(tempdir,
- test_name + '.png'))
- print('All test files for figure hashes can be found in {0}'.format(
- tempdir))
-
- # Check if additions have been made to the hash library
- if len(hash.hash_library) > hash_library_original_len:
+ if len(figure_test_pngfiles) > 0:
+ tempdir = tempfile.mkdtemp(suffix="_figures")
+
+ # Rename each PNG with the name of the corresponding test
+ for test_name in figure_test_pngfiles:
+ os.rename(figure_test_pngfiles[test_name], os.path.join(tempdir, test_name + '.png'))
+
# Write the new hash library in JSON
- tempdir = tempfile.mkdtemp()
- hashfile = os.path.join(tempdir, hash.HASH_LIBRARY_NAME)
- with open(hashfile, 'wb') as outfile:
- json.dump(
- hash.hash_library,
- outfile,
- sort_keys=True,
- indent=4,
- separators=(',', ': '))
- print(
- "The hash library has expanded and should be copied to sunpy/tests/")
- print(" " + hashfile)
+ hashfile = os.path.join(tempdir, HASH_LIBRARY_NAME)
+ with open(hashfile, 'w') as outfile:
+ json.dump(new_hash_library, outfile, sort_keys=True, indent=4, separators=(',', ': '))
+
+ print('All test files for figure hashes can be found in {0}'.format(tempdir))
+ print("The corresponding hash library is {0}".format(hashfile))
|
{"golden_diff": "diff --git a/sunpy/conftest.py b/sunpy/conftest.py\n--- a/sunpy/conftest.py\n+++ b/sunpy/conftest.py\n@@ -22,9 +22,8 @@\n \n from astropy.tests import disable_internet\n \n-from sunpy.tests import hash\n-\n-hash_library_original_len = len(hash.hash_library)\n+from sunpy.tests.hash import HASH_LIBRARY_NAME\n+from sunpy.tests.helpers import new_hash_library, figure_test_pngfiles\n \n GOOGLE_URL = 'http://www.google.com'\n \n@@ -61,31 +60,17 @@\n \n \n def pytest_unconfigure(config):\n- tempdir = tempfile.mkdtemp(suffix=\"_figures\")\n- # the hash_library is indexed by the name of the test but we want to look\n- # things up with the hash value\n- inv_hash_library = {v: k for k, v in hash.hash_library.items()}\n-\n- for h in hash.file_list:\n- test_name = inv_hash_library.get(h, '')\n- if test_name != '':\n- os.rename(hash.file_list[h], os.path.join(tempdir,\n- test_name + '.png'))\n- print('All test files for figure hashes can be found in {0}'.format(\n- tempdir))\n-\n- # Check if additions have been made to the hash library\n- if len(hash.hash_library) > hash_library_original_len:\n+ if len(figure_test_pngfiles) > 0:\n+ tempdir = tempfile.mkdtemp(suffix=\"_figures\")\n+\n+ # Rename each PNG with the name of the corresponding test\n+ for test_name in figure_test_pngfiles:\n+ os.rename(figure_test_pngfiles[test_name], os.path.join(tempdir, test_name + '.png'))\n+\n # Write the new hash library in JSON\n- tempdir = tempfile.mkdtemp()\n- hashfile = os.path.join(tempdir, hash.HASH_LIBRARY_NAME)\n- with open(hashfile, 'wb') as outfile:\n- json.dump(\n- hash.hash_library,\n- outfile,\n- sort_keys=True,\n- indent=4,\n- separators=(',', ': '))\n- print(\n- \"The hash library has expanded and should be copied to sunpy/tests/\")\n- print(\" \" + hashfile)\n+ hashfile = os.path.join(tempdir, HASH_LIBRARY_NAME)\n+ with open(hashfile, 'w') as outfile:\n+ json.dump(new_hash_library, outfile, sort_keys=True, indent=4, separators=(',', ': '))\n+\n+ print('All test files for figure hashes can be found in {0}'.format(tempdir))\n+ print(\"The corresponding hash library is {0}\".format(hashfile))\n", "issue": "tests create figure hashes directory\nwhen running `py.tests` on single file it creates unneeded tmp directories.\n\n", "before_files": [{"content": "from __future__ import absolute_import, print_function\nfrom functools import partial\n\nimport os\nimport socket\nimport tempfile\nimport json\n\nfrom sunpy.extern.six.moves.urllib.request import urlopen\nfrom sunpy.extern.six.moves.urllib.error import URLError\n\nimport pytest\n\n\n# Force MPL to use non-gui backends for testing.\ntry:\n import matplotlib\nexcept ImportError:\n pass\nelse:\n matplotlib.use('Agg')\n\nfrom astropy.tests import disable_internet\n\nfrom sunpy.tests import hash\n\nhash_library_original_len = len(hash.hash_library)\n\nGOOGLE_URL = 'http://www.google.com'\n\n\ndef site_reachable(url):\n try:\n urlopen(url, timeout=1)\n except (URLError, socket.timeout):\n return False\n else:\n return True\n\n\nis_online = partial(site_reachable, GOOGLE_URL)\n\n\ndef pytest_runtest_setup(item):\n \"\"\"\n pytest hook to skip all tests that have the mark 'online' if the\n client is online (simply detected by checking whether http://www.google.com\n can be requested).\n \"\"\"\n if isinstance(item, item.Function):\n if 'online' in item.keywords and not is_online():\n msg = 'skipping test {0} (reason: client seems to be offline)'\n pytest.skip(msg.format(item.name))\n\n if 'online' not in item.keywords:\n disable_internet.turn_off_internet()\n\n\ndef pytest_runtest_teardown(item, nextitem):\n disable_internet.turn_on_internet()\n\n\ndef pytest_unconfigure(config):\n tempdir = tempfile.mkdtemp(suffix=\"_figures\")\n # the hash_library is indexed by the name of the test but we want to look\n # things up with the hash value\n inv_hash_library = {v: k for k, v in hash.hash_library.items()}\n\n for h in hash.file_list:\n test_name = inv_hash_library.get(h, '')\n if test_name != '':\n os.rename(hash.file_list[h], os.path.join(tempdir,\n test_name + '.png'))\n print('All test files for figure hashes can be found in {0}'.format(\n tempdir))\n\n # Check if additions have been made to the hash library\n if len(hash.hash_library) > hash_library_original_len:\n # Write the new hash library in JSON\n tempdir = tempfile.mkdtemp()\n hashfile = os.path.join(tempdir, hash.HASH_LIBRARY_NAME)\n with open(hashfile, 'wb') as outfile:\n json.dump(\n hash.hash_library,\n outfile,\n sort_keys=True,\n indent=4,\n separators=(',', ': '))\n print(\n \"The hash library has expanded and should be copied to sunpy/tests/\")\n print(\" \" + hashfile)\n", "path": "sunpy/conftest.py"}], "after_files": [{"content": "from __future__ import absolute_import, print_function\nfrom functools import partial\n\nimport os\nimport socket\nimport tempfile\nimport json\n\nfrom sunpy.extern.six.moves.urllib.request import urlopen\nfrom sunpy.extern.six.moves.urllib.error import URLError\n\nimport pytest\n\n\n# Force MPL to use non-gui backends for testing.\ntry:\n import matplotlib\nexcept ImportError:\n pass\nelse:\n matplotlib.use('Agg')\n\nfrom astropy.tests import disable_internet\n\nfrom sunpy.tests.hash import HASH_LIBRARY_NAME\nfrom sunpy.tests.helpers import new_hash_library, figure_test_pngfiles\n\nGOOGLE_URL = 'http://www.google.com'\n\n\ndef site_reachable(url):\n try:\n urlopen(url, timeout=1)\n except (URLError, socket.timeout):\n return False\n else:\n return True\n\n\nis_online = partial(site_reachable, GOOGLE_URL)\n\n\ndef pytest_runtest_setup(item):\n \"\"\"\n pytest hook to skip all tests that have the mark 'online' if the\n client is online (simply detected by checking whether http://www.google.com\n can be requested).\n \"\"\"\n if isinstance(item, item.Function):\n if 'online' in item.keywords and not is_online():\n msg = 'skipping test {0} (reason: client seems to be offline)'\n pytest.skip(msg.format(item.name))\n\n if 'online' not in item.keywords:\n disable_internet.turn_off_internet()\n\n\ndef pytest_runtest_teardown(item, nextitem):\n disable_internet.turn_on_internet()\n\n\ndef pytest_unconfigure(config):\n if len(figure_test_pngfiles) > 0:\n tempdir = tempfile.mkdtemp(suffix=\"_figures\")\n\n # Rename each PNG with the name of the corresponding test\n for test_name in figure_test_pngfiles:\n os.rename(figure_test_pngfiles[test_name], os.path.join(tempdir, test_name + '.png'))\n\n # Write the new hash library in JSON\n hashfile = os.path.join(tempdir, HASH_LIBRARY_NAME)\n with open(hashfile, 'w') as outfile:\n json.dump(new_hash_library, outfile, sort_keys=True, indent=4, separators=(',', ': '))\n\n print('All test files for figure hashes can be found in {0}'.format(tempdir))\n print(\"The corresponding hash library is {0}\".format(hashfile))\n", "path": "sunpy/conftest.py"}]}
| 1,056 | 589 |
gh_patches_debug_8984
|
rasdani/github-patches
|
git_diff
|
gammapy__gammapy-2896
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Install fails when Cython is not already installed
**Gammapy version**
Every I tested
**Bug description**
The setup.py hard imports Cython, so setuptools has no chance to install it.
**Expected behavior**
Install cython if needed before attempting to import it
**To Reproduce**
```
$ python -m venv test_gammapy
$ . test_gammapy/bin/activate
$ pip install gammapy
```
**Other information**
There is a very simple fix: provide a pyproject.toml that requires cython for the build system.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 # Licensed under a 3-clause BSD style license - see LICENSE.rst
2 import sys
3 import setuptools
4 from distutils.version import LooseVersion
5
6 if LooseVersion(setuptools.__version__) < "30.3":
7 sys.stderr.write("ERROR: setuptools 30.3 or later is required by gammapy\n")
8 sys.exit(1)
9
10 # TODO: check if setuptools_scm, numpy, ... are OK
11 # Exit with good error message telling people to install those first if not
12
13
14 from Cython.Build import cythonize
15 from distutils.extension import Extension
16 import numpy as np
17
18
19 def make_cython_extension(filename):
20 return Extension(
21 filename.strip(".pyx").replace("/", "."),
22 [filename],
23 include_dirs=[np.get_include()],
24 )
25
26
27 cython_files = [
28 "gammapy/stats/fit_statistics_cython.pyx",
29 ]
30
31 ext_modules = cythonize([make_cython_extension(_) for _ in cython_files])
32
33 setuptools.setup(use_scm_version=True, ext_modules=ext_modules)
34
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -1,16 +1,5 @@
# Licensed under a 3-clause BSD style license - see LICENSE.rst
-import sys
import setuptools
-from distutils.version import LooseVersion
-
-if LooseVersion(setuptools.__version__) < "30.3":
- sys.stderr.write("ERROR: setuptools 30.3 or later is required by gammapy\n")
- sys.exit(1)
-
-# TODO: check if setuptools_scm, numpy, ... are OK
-# Exit with good error message telling people to install those first if not
-
-
from Cython.Build import cythonize
from distutils.extension import Extension
import numpy as np
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -1,16 +1,5 @@\n # Licensed under a 3-clause BSD style license - see LICENSE.rst\n-import sys\n import setuptools\n-from distutils.version import LooseVersion\n-\n-if LooseVersion(setuptools.__version__) < \"30.3\":\n- sys.stderr.write(\"ERROR: setuptools 30.3 or later is required by gammapy\\n\")\n- sys.exit(1)\n-\n-# TODO: check if setuptools_scm, numpy, ... are OK\n-# Exit with good error message telling people to install those first if not\n-\n-\n from Cython.Build import cythonize\n from distutils.extension import Extension\n import numpy as np\n", "issue": "Install fails when Cython is not already installed\n**Gammapy version**\r\n\r\nEvery I tested\r\n\r\n**Bug description**\r\n\r\nThe setup.py hard imports Cython, so setuptools has no chance to install it.\r\n\r\n**Expected behavior**\r\n\r\nInstall cython if needed before attempting to import it\r\n\r\n**To Reproduce**\r\n\r\n\r\n```\r\n$ python -m venv test_gammapy\r\n$ . test_gammapy/bin/activate\r\n$ pip install gammapy\r\n```\r\n\r\n\r\n**Other information**\r\nThere is a very simple fix: provide a pyproject.toml that requires cython for the build system.\r\n\n", "before_files": [{"content": "# Licensed under a 3-clause BSD style license - see LICENSE.rst\nimport sys\nimport setuptools\nfrom distutils.version import LooseVersion\n\nif LooseVersion(setuptools.__version__) < \"30.3\":\n sys.stderr.write(\"ERROR: setuptools 30.3 or later is required by gammapy\\n\")\n sys.exit(1)\n\n# TODO: check if setuptools_scm, numpy, ... are OK\n# Exit with good error message telling people to install those first if not\n\n\nfrom Cython.Build import cythonize\nfrom distutils.extension import Extension\nimport numpy as np\n\n\ndef make_cython_extension(filename):\n return Extension(\n filename.strip(\".pyx\").replace(\"/\", \".\"),\n [filename],\n include_dirs=[np.get_include()],\n )\n\n\ncython_files = [\n \"gammapy/stats/fit_statistics_cython.pyx\",\n]\n\next_modules = cythonize([make_cython_extension(_) for _ in cython_files])\n\nsetuptools.setup(use_scm_version=True, ext_modules=ext_modules)\n", "path": "setup.py"}], "after_files": [{"content": "# Licensed under a 3-clause BSD style license - see LICENSE.rst\nimport setuptools\nfrom Cython.Build import cythonize\nfrom distutils.extension import Extension\nimport numpy as np\n\n\ndef make_cython_extension(filename):\n return Extension(\n filename.strip(\".pyx\").replace(\"/\", \".\"),\n [filename],\n include_dirs=[np.get_include()],\n )\n\n\ncython_files = [\n \"gammapy/stats/fit_statistics_cython.pyx\",\n]\n\next_modules = cythonize([make_cython_extension(_) for _ in cython_files])\n\nsetuptools.setup(use_scm_version=True, ext_modules=ext_modules)\n", "path": "setup.py"}]}
| 666 | 165 |
gh_patches_debug_7143
|
rasdani/github-patches
|
git_diff
|
pypi__warehouse-13499
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Circular import for warehouse.oidc
Surfaced via:
```shellsession
python -c "import warehouse.oidc"
```
```
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "workpath/warehouse/warehouse/oidc/__init__.py", line 13, in <module>
from warehouse.oidc.interfaces import IOIDCPublisherService
File "workpath/warehouse/warehouse/oidc/interfaces.py", line 19, in <module>
from warehouse.packaging.models import Project
File "workpath/warehouse/warehouse/packaging/__init__.py", line 17, in <module>
from warehouse.accounts.models import Email, User
File "workpath/warehouse/warehouse/accounts/__init__.py", line 22, in <module>
from warehouse.accounts.security_policy import (
File "workpath/warehouse/warehouse/accounts/security_policy.py", line 36, in <module>
from warehouse.utils.security_policy import AuthenticationMethod
File "workpath/warehouse/warehouse/utils/security_policy.py", line 21, in <module>
from warehouse.oidc.models import OIDCPublisher
File "workpath/warehouse/warehouse/oidc/models.py", line 24, in <module>
from warehouse.oidc.interfaces import SignedClaims
ImportError: cannot import name 'SignedClaims' from partially initialized module 'warehouse.oidc.interfaces' (most likely due to a circular import) (workpath/warehouse/warehouse/oidc/interfaces.py)
```
This isn't posing a problem in production (yet). I found it when I tried to filter out the warnings from `InsecureOIDCPublisherWarning` - but we should figure out how to not have a circular import.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `warehouse/oidc/interfaces.py`
Content:
```
1 # Licensed under the Apache License, Version 2.0 (the "License");
2 # you may not use this file except in compliance with the License.
3 # You may obtain a copy of the License at
4 #
5 # http://www.apache.org/licenses/LICENSE-2.0
6 #
7 # Unless required by applicable law or agreed to in writing, software
8 # distributed under the License is distributed on an "AS IS" BASIS,
9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10 # See the License for the specific language governing permissions and
11 # limitations under the License.
12
13 from __future__ import annotations
14
15 from typing import TYPE_CHECKING, Any, NewType
16
17 from zope.interface import Interface
18
19 from warehouse.packaging.models import Project
20 from warehouse.rate_limiting.interfaces import RateLimiterException
21
22 if TYPE_CHECKING:
23 from warehouse.oidc.models import PendingOIDCPublisher # pragma: no cover
24
25 SignedClaims = NewType("SignedClaims", dict[str, Any])
26
27
28 class IOIDCPublisherService(Interface):
29 def verify_jwt_signature(unverified_token: str):
30 """
31 Verify the given JWT's signature, returning its signed claims if
32 valid. If the signature is invalid, `None` is returned.
33
34 This method does **not** verify the claim set itself -- the API
35 consumer is responsible for evaluating the claim set.
36 """
37 pass
38
39 def find_publisher(signed_claims: SignedClaims, *, pending: bool = False):
40 """
41 Given a mapping of signed claims produced by `verify_jwt_signature`,
42 attempt to find and return either a `OIDCPublisher` or `PendingOIDCPublisher`
43 that matches them, depending on the value of `pending`.
44
45 If no publisher matches the claims, `None` is returned.
46 """
47 pass
48
49 def reify_pending_publisher(
50 pending_publisher: PendingOIDCPublisher, project: Project
51 ):
52 """
53 Reify the given pending `PendingOIDCPublisher` into an `OIDCPublisher`,
54 adding it to the given project (presumed newly created) in the process.
55
56 Returns the reified publisher.
57 """
58 pass
59
60
61 class TooManyOIDCRegistrations(RateLimiterException):
62 pass
63
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/warehouse/oidc/interfaces.py b/warehouse/oidc/interfaces.py
--- a/warehouse/oidc/interfaces.py
+++ b/warehouse/oidc/interfaces.py
@@ -16,11 +16,11 @@
from zope.interface import Interface
-from warehouse.packaging.models import Project
from warehouse.rate_limiting.interfaces import RateLimiterException
if TYPE_CHECKING:
- from warehouse.oidc.models import PendingOIDCPublisher # pragma: no cover
+ from warehouse.oidc.models import PendingOIDCPublisher
+ from warehouse.packaging.models import Project
SignedClaims = NewType("SignedClaims", dict[str, Any])
|
{"golden_diff": "diff --git a/warehouse/oidc/interfaces.py b/warehouse/oidc/interfaces.py\n--- a/warehouse/oidc/interfaces.py\n+++ b/warehouse/oidc/interfaces.py\n@@ -16,11 +16,11 @@\n \n from zope.interface import Interface\n \n-from warehouse.packaging.models import Project\n from warehouse.rate_limiting.interfaces import RateLimiterException\n \n if TYPE_CHECKING:\n- from warehouse.oidc.models import PendingOIDCPublisher # pragma: no cover\n+ from warehouse.oidc.models import PendingOIDCPublisher\n+ from warehouse.packaging.models import Project\n \n SignedClaims = NewType(\"SignedClaims\", dict[str, Any])\n", "issue": "Circular import for warehouse.oidc\nSurfaced via:\r\n\r\n```shellsession\r\npython -c \"import warehouse.oidc\"\r\n```\r\n\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"<string>\", line 1, in <module>\r\n File \"workpath/warehouse/warehouse/oidc/__init__.py\", line 13, in <module>\r\n from warehouse.oidc.interfaces import IOIDCPublisherService\r\n File \"workpath/warehouse/warehouse/oidc/interfaces.py\", line 19, in <module>\r\n from warehouse.packaging.models import Project\r\n File \"workpath/warehouse/warehouse/packaging/__init__.py\", line 17, in <module>\r\n from warehouse.accounts.models import Email, User\r\n File \"workpath/warehouse/warehouse/accounts/__init__.py\", line 22, in <module>\r\n from warehouse.accounts.security_policy import (\r\n File \"workpath/warehouse/warehouse/accounts/security_policy.py\", line 36, in <module>\r\n from warehouse.utils.security_policy import AuthenticationMethod\r\n File \"workpath/warehouse/warehouse/utils/security_policy.py\", line 21, in <module>\r\n from warehouse.oidc.models import OIDCPublisher\r\n File \"workpath/warehouse/warehouse/oidc/models.py\", line 24, in <module>\r\n from warehouse.oidc.interfaces import SignedClaims\r\nImportError: cannot import name 'SignedClaims' from partially initialized module 'warehouse.oidc.interfaces' (most likely due to a circular import) (workpath/warehouse/warehouse/oidc/interfaces.py)\r\n```\r\n\r\nThis isn't posing a problem in production (yet). I found it when I tried to filter out the warnings from `InsecureOIDCPublisherWarning` - but we should figure out how to not have a circular import.\n", "before_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import annotations\n\nfrom typing import TYPE_CHECKING, Any, NewType\n\nfrom zope.interface import Interface\n\nfrom warehouse.packaging.models import Project\nfrom warehouse.rate_limiting.interfaces import RateLimiterException\n\nif TYPE_CHECKING:\n from warehouse.oidc.models import PendingOIDCPublisher # pragma: no cover\n\nSignedClaims = NewType(\"SignedClaims\", dict[str, Any])\n\n\nclass IOIDCPublisherService(Interface):\n def verify_jwt_signature(unverified_token: str):\n \"\"\"\n Verify the given JWT's signature, returning its signed claims if\n valid. If the signature is invalid, `None` is returned.\n\n This method does **not** verify the claim set itself -- the API\n consumer is responsible for evaluating the claim set.\n \"\"\"\n pass\n\n def find_publisher(signed_claims: SignedClaims, *, pending: bool = False):\n \"\"\"\n Given a mapping of signed claims produced by `verify_jwt_signature`,\n attempt to find and return either a `OIDCPublisher` or `PendingOIDCPublisher`\n that matches them, depending on the value of `pending`.\n\n If no publisher matches the claims, `None` is returned.\n \"\"\"\n pass\n\n def reify_pending_publisher(\n pending_publisher: PendingOIDCPublisher, project: Project\n ):\n \"\"\"\n Reify the given pending `PendingOIDCPublisher` into an `OIDCPublisher`,\n adding it to the given project (presumed newly created) in the process.\n\n Returns the reified publisher.\n \"\"\"\n pass\n\n\nclass TooManyOIDCRegistrations(RateLimiterException):\n pass\n", "path": "warehouse/oidc/interfaces.py"}], "after_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import annotations\n\nfrom typing import TYPE_CHECKING, Any, NewType\n\nfrom zope.interface import Interface\n\nfrom warehouse.rate_limiting.interfaces import RateLimiterException\n\nif TYPE_CHECKING:\n from warehouse.oidc.models import PendingOIDCPublisher\n from warehouse.packaging.models import Project\n\nSignedClaims = NewType(\"SignedClaims\", dict[str, Any])\n\n\nclass IOIDCPublisherService(Interface):\n def verify_jwt_signature(unverified_token: str):\n \"\"\"\n Verify the given JWT's signature, returning its signed claims if\n valid. If the signature is invalid, `None` is returned.\n\n This method does **not** verify the claim set itself -- the API\n consumer is responsible for evaluating the claim set.\n \"\"\"\n pass\n\n def find_publisher(signed_claims: SignedClaims, *, pending: bool = False):\n \"\"\"\n Given a mapping of signed claims produced by `verify_jwt_signature`,\n attempt to find and return either a `OIDCPublisher` or `PendingOIDCPublisher`\n that matches them, depending on the value of `pending`.\n\n If no publisher matches the claims, `None` is returned.\n \"\"\"\n pass\n\n def reify_pending_publisher(\n pending_publisher: PendingOIDCPublisher, project: Project\n ):\n \"\"\"\n Reify the given pending `PendingOIDCPublisher` into an `OIDCPublisher`,\n adding it to the given project (presumed newly created) in the process.\n\n Returns the reified publisher.\n \"\"\"\n pass\n\n\nclass TooManyOIDCRegistrations(RateLimiterException):\n pass\n", "path": "warehouse/oidc/interfaces.py"}]}
| 1,237 | 145 |
gh_patches_debug_1780
|
rasdani/github-patches
|
git_diff
|
localstack__localstack-4075
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Support ruby2.7 runtime
AWS supports the following Ruby runtimes:
<!--StartFragment-->
Name | Identifier | SDK for Ruby | Operating system
-- | -- | -- | --
Ruby 2.7 | ruby2.7 | 3.0.1 | Amazon Linux 2
Ruby 2.5 | ruby2.5 | 3.0.1 | Amazon Linux
<!--EndFragment-->
Currently, `localstack/lambda` only contains the `ruby2.5` tag. Will the 2.7 runtime be supported in the (near) future?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `localstack/services/awslambda/lambda_utils.py`
Content:
```
1 import os
2 from collections import defaultdict
3 from localstack.utils.common import to_str
4
5 # Lambda runtime constants
6 LAMBDA_RUNTIME_PYTHON27 = 'python2.7'
7 LAMBDA_RUNTIME_PYTHON36 = 'python3.6'
8 LAMBDA_RUNTIME_PYTHON37 = 'python3.7'
9 LAMBDA_RUNTIME_PYTHON38 = 'python3.8'
10 LAMBDA_RUNTIME_NODEJS = 'nodejs'
11 LAMBDA_RUNTIME_NODEJS43 = 'nodejs4.3'
12 LAMBDA_RUNTIME_NODEJS610 = 'nodejs6.10'
13 LAMBDA_RUNTIME_NODEJS810 = 'nodejs8.10'
14 LAMBDA_RUNTIME_NODEJS10X = 'nodejs10.x'
15 LAMBDA_RUNTIME_NODEJS12X = 'nodejs12.x'
16 LAMBDA_RUNTIME_NODEJS14X = 'nodejs14.x'
17 LAMBDA_RUNTIME_JAVA8 = 'java8'
18 LAMBDA_RUNTIME_JAVA11 = 'java11'
19 LAMBDA_RUNTIME_DOTNETCORE2 = 'dotnetcore2.0'
20 LAMBDA_RUNTIME_DOTNETCORE21 = 'dotnetcore2.1'
21 LAMBDA_RUNTIME_DOTNETCORE31 = 'dotnetcore3.1'
22 LAMBDA_RUNTIME_GOLANG = 'go1.x'
23 LAMBDA_RUNTIME_RUBY = 'ruby'
24 LAMBDA_RUNTIME_RUBY25 = 'ruby2.5'
25 LAMBDA_RUNTIME_PROVIDED = 'provided'
26
27 # default handler and runtime
28 LAMBDA_DEFAULT_HANDLER = 'handler.handler'
29 LAMBDA_DEFAULT_RUNTIME = 'python3.8'
30 LAMBDA_DEFAULT_STARTING_POSITION = 'LATEST'
31
32 # List of Dotnet Lambda runtime names
33 DOTNET_LAMBDA_RUNTIMES = [LAMBDA_RUNTIME_DOTNETCORE2, LAMBDA_RUNTIME_DOTNETCORE21, LAMBDA_RUNTIME_DOTNETCORE31]
34
35
36 def multi_value_dict_for_list(elements):
37 temp_mv_dict = defaultdict(list)
38 for key in elements:
39 if isinstance(key, (list, tuple)):
40 key, value = key
41 else:
42 value = elements[key]
43 key = to_str(key)
44 temp_mv_dict[key].append(value)
45
46 return dict((k, tuple(v)) for k, v in temp_mv_dict.items())
47
48
49 def get_handler_file_from_name(handler_name, runtime=LAMBDA_DEFAULT_RUNTIME):
50 if runtime.startswith(LAMBDA_RUNTIME_PROVIDED):
51 return 'bootstrap'
52 delimiter = '.'
53 if runtime.startswith(LAMBDA_RUNTIME_NODEJS):
54 file_ext = '.js'
55 elif runtime.startswith(LAMBDA_RUNTIME_GOLANG):
56 file_ext = ''
57 elif runtime.startswith(tuple(DOTNET_LAMBDA_RUNTIMES)):
58 file_ext = '.dll'
59 delimiter = ':'
60 elif runtime.startswith(LAMBDA_RUNTIME_RUBY):
61 file_ext = '.rb'
62 else:
63 handler_name = handler_name.rpartition(delimiter)[0].replace(delimiter, os.path.sep)
64 file_ext = '.py'
65 return '%s%s' % (handler_name.split(delimiter)[0], file_ext)
66
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/localstack/services/awslambda/lambda_utils.py b/localstack/services/awslambda/lambda_utils.py
--- a/localstack/services/awslambda/lambda_utils.py
+++ b/localstack/services/awslambda/lambda_utils.py
@@ -22,6 +22,7 @@
LAMBDA_RUNTIME_GOLANG = 'go1.x'
LAMBDA_RUNTIME_RUBY = 'ruby'
LAMBDA_RUNTIME_RUBY25 = 'ruby2.5'
+LAMBDA_RUNTIME_RUBY27 = 'ruby2.7'
LAMBDA_RUNTIME_PROVIDED = 'provided'
# default handler and runtime
|
{"golden_diff": "diff --git a/localstack/services/awslambda/lambda_utils.py b/localstack/services/awslambda/lambda_utils.py\n--- a/localstack/services/awslambda/lambda_utils.py\n+++ b/localstack/services/awslambda/lambda_utils.py\n@@ -22,6 +22,7 @@\n LAMBDA_RUNTIME_GOLANG = 'go1.x'\n LAMBDA_RUNTIME_RUBY = 'ruby'\n LAMBDA_RUNTIME_RUBY25 = 'ruby2.5'\n+LAMBDA_RUNTIME_RUBY27 = 'ruby2.7'\n LAMBDA_RUNTIME_PROVIDED = 'provided'\n \n # default handler and runtime\n", "issue": "Support ruby2.7 runtime\nAWS supports the following Ruby runtimes:\r\n\r\n<!--StartFragment-->\r\nName | Identifier | SDK for Ruby | Operating system\r\n-- | -- | -- | --\r\nRuby 2.7 | ruby2.7 | 3.0.1 | Amazon Linux 2\r\nRuby 2.5 | ruby2.5 | 3.0.1 | Amazon Linux\r\n\r\n<!--EndFragment-->\r\n\r\nCurrently, `localstack/lambda` only contains the `ruby2.5` tag. Will the 2.7 runtime be supported in the (near) future?\n", "before_files": [{"content": "import os\nfrom collections import defaultdict\nfrom localstack.utils.common import to_str\n\n# Lambda runtime constants\nLAMBDA_RUNTIME_PYTHON27 = 'python2.7'\nLAMBDA_RUNTIME_PYTHON36 = 'python3.6'\nLAMBDA_RUNTIME_PYTHON37 = 'python3.7'\nLAMBDA_RUNTIME_PYTHON38 = 'python3.8'\nLAMBDA_RUNTIME_NODEJS = 'nodejs'\nLAMBDA_RUNTIME_NODEJS43 = 'nodejs4.3'\nLAMBDA_RUNTIME_NODEJS610 = 'nodejs6.10'\nLAMBDA_RUNTIME_NODEJS810 = 'nodejs8.10'\nLAMBDA_RUNTIME_NODEJS10X = 'nodejs10.x'\nLAMBDA_RUNTIME_NODEJS12X = 'nodejs12.x'\nLAMBDA_RUNTIME_NODEJS14X = 'nodejs14.x'\nLAMBDA_RUNTIME_JAVA8 = 'java8'\nLAMBDA_RUNTIME_JAVA11 = 'java11'\nLAMBDA_RUNTIME_DOTNETCORE2 = 'dotnetcore2.0'\nLAMBDA_RUNTIME_DOTNETCORE21 = 'dotnetcore2.1'\nLAMBDA_RUNTIME_DOTNETCORE31 = 'dotnetcore3.1'\nLAMBDA_RUNTIME_GOLANG = 'go1.x'\nLAMBDA_RUNTIME_RUBY = 'ruby'\nLAMBDA_RUNTIME_RUBY25 = 'ruby2.5'\nLAMBDA_RUNTIME_PROVIDED = 'provided'\n\n# default handler and runtime\nLAMBDA_DEFAULT_HANDLER = 'handler.handler'\nLAMBDA_DEFAULT_RUNTIME = 'python3.8'\nLAMBDA_DEFAULT_STARTING_POSITION = 'LATEST'\n\n# List of Dotnet Lambda runtime names\nDOTNET_LAMBDA_RUNTIMES = [LAMBDA_RUNTIME_DOTNETCORE2, LAMBDA_RUNTIME_DOTNETCORE21, LAMBDA_RUNTIME_DOTNETCORE31]\n\n\ndef multi_value_dict_for_list(elements):\n temp_mv_dict = defaultdict(list)\n for key in elements:\n if isinstance(key, (list, tuple)):\n key, value = key\n else:\n value = elements[key]\n key = to_str(key)\n temp_mv_dict[key].append(value)\n\n return dict((k, tuple(v)) for k, v in temp_mv_dict.items())\n\n\ndef get_handler_file_from_name(handler_name, runtime=LAMBDA_DEFAULT_RUNTIME):\n if runtime.startswith(LAMBDA_RUNTIME_PROVIDED):\n return 'bootstrap'\n delimiter = '.'\n if runtime.startswith(LAMBDA_RUNTIME_NODEJS):\n file_ext = '.js'\n elif runtime.startswith(LAMBDA_RUNTIME_GOLANG):\n file_ext = ''\n elif runtime.startswith(tuple(DOTNET_LAMBDA_RUNTIMES)):\n file_ext = '.dll'\n delimiter = ':'\n elif runtime.startswith(LAMBDA_RUNTIME_RUBY):\n file_ext = '.rb'\n else:\n handler_name = handler_name.rpartition(delimiter)[0].replace(delimiter, os.path.sep)\n file_ext = '.py'\n return '%s%s' % (handler_name.split(delimiter)[0], file_ext)\n", "path": "localstack/services/awslambda/lambda_utils.py"}], "after_files": [{"content": "import os\nfrom collections import defaultdict\nfrom localstack.utils.common import to_str\n\n# Lambda runtime constants\nLAMBDA_RUNTIME_PYTHON27 = 'python2.7'\nLAMBDA_RUNTIME_PYTHON36 = 'python3.6'\nLAMBDA_RUNTIME_PYTHON37 = 'python3.7'\nLAMBDA_RUNTIME_PYTHON38 = 'python3.8'\nLAMBDA_RUNTIME_NODEJS = 'nodejs'\nLAMBDA_RUNTIME_NODEJS43 = 'nodejs4.3'\nLAMBDA_RUNTIME_NODEJS610 = 'nodejs6.10'\nLAMBDA_RUNTIME_NODEJS810 = 'nodejs8.10'\nLAMBDA_RUNTIME_NODEJS10X = 'nodejs10.x'\nLAMBDA_RUNTIME_NODEJS12X = 'nodejs12.x'\nLAMBDA_RUNTIME_NODEJS14X = 'nodejs14.x'\nLAMBDA_RUNTIME_JAVA8 = 'java8'\nLAMBDA_RUNTIME_JAVA11 = 'java11'\nLAMBDA_RUNTIME_DOTNETCORE2 = 'dotnetcore2.0'\nLAMBDA_RUNTIME_DOTNETCORE21 = 'dotnetcore2.1'\nLAMBDA_RUNTIME_DOTNETCORE31 = 'dotnetcore3.1'\nLAMBDA_RUNTIME_GOLANG = 'go1.x'\nLAMBDA_RUNTIME_RUBY = 'ruby'\nLAMBDA_RUNTIME_RUBY25 = 'ruby2.5'\nLAMBDA_RUNTIME_RUBY27 = 'ruby2.7'\nLAMBDA_RUNTIME_PROVIDED = 'provided'\n\n# default handler and runtime\nLAMBDA_DEFAULT_HANDLER = 'handler.handler'\nLAMBDA_DEFAULT_RUNTIME = 'python3.8'\nLAMBDA_DEFAULT_STARTING_POSITION = 'LATEST'\n\n# List of Dotnet Lambda runtime names\nDOTNET_LAMBDA_RUNTIMES = [LAMBDA_RUNTIME_DOTNETCORE2, LAMBDA_RUNTIME_DOTNETCORE21, LAMBDA_RUNTIME_DOTNETCORE31]\n\n\ndef multi_value_dict_for_list(elements):\n temp_mv_dict = defaultdict(list)\n for key in elements:\n if isinstance(key, (list, tuple)):\n key, value = key\n else:\n value = elements[key]\n key = to_str(key)\n temp_mv_dict[key].append(value)\n\n return dict((k, tuple(v)) for k, v in temp_mv_dict.items())\n\n\ndef get_handler_file_from_name(handler_name, runtime=LAMBDA_DEFAULT_RUNTIME):\n if runtime.startswith(LAMBDA_RUNTIME_PROVIDED):\n return 'bootstrap'\n delimiter = '.'\n if runtime.startswith(LAMBDA_RUNTIME_NODEJS):\n file_ext = '.js'\n elif runtime.startswith(LAMBDA_RUNTIME_GOLANG):\n file_ext = ''\n elif runtime.startswith(tuple(DOTNET_LAMBDA_RUNTIMES)):\n file_ext = '.dll'\n delimiter = ':'\n elif runtime.startswith(LAMBDA_RUNTIME_RUBY):\n file_ext = '.rb'\n else:\n handler_name = handler_name.rpartition(delimiter)[0].replace(delimiter, os.path.sep)\n file_ext = '.py'\n return '%s%s' % (handler_name.split(delimiter)[0], file_ext)\n", "path": "localstack/services/awslambda/lambda_utils.py"}]}
| 1,179 | 139 |
gh_patches_debug_13003
|
rasdani/github-patches
|
git_diff
|
replicate__cog-362
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Support importing model from subdirectories
This works:
```yaml
model: "predict.py:MyModel"
```
But this doesn't:
```yaml:
model: "mymodel/predict.py:MyModel"
```
It should.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `python/cog/predictor.py`
Content:
```
1 from abc import ABC, abstractmethod
2 import importlib
3 import os.path
4 from pathlib import Path
5 from typing import Dict, Any
6
7 import yaml
8
9 from .errors import ConfigDoesNotExist, PredictorNotSet
10
11
12 # TODO(andreas): handle directory input
13 # TODO(andreas): handle List[Dict[str, int]], etc.
14 # TODO(andreas): model-level documentation
15
16
17 class Predictor(ABC):
18 @abstractmethod
19 def setup(self):
20 pass
21
22 @abstractmethod
23 def predict(self, **kwargs):
24 pass
25
26 def get_type_signature(self):
27 """
28 Returns a dict describing the inputs of the model.
29 """
30 from .input import (
31 get_type_name,
32 UNSPECIFIED,
33 )
34
35 inputs = []
36 if hasattr(self.predict, "_inputs"):
37 input_specs = self.predict._inputs
38 for spec in input_specs:
39 arg: Dict[str, Any] = {
40 "name": spec.name,
41 "type": get_type_name(spec.type),
42 }
43 if spec.help:
44 arg["help"] = spec.help
45 if spec.default is not UNSPECIFIED:
46 arg["default"] = str(spec.default) # TODO: don't string this
47 if spec.min is not None:
48 arg["min"] = str(spec.min) # TODO: don't string this
49 if spec.max is not None:
50 arg["max"] = str(spec.max) # TODO: don't string this
51 if spec.options is not None:
52 arg["options"] = [str(o) for o in spec.options]
53 inputs.append(arg)
54 return {"inputs": inputs}
55
56
57 def run_prediction(predictor, inputs, cleanup_functions):
58 """
59 Run the predictor on the inputs, and append resulting paths
60 to cleanup functions for removal.
61 """
62 result = predictor.predict(**inputs)
63 if isinstance(result, Path):
64 cleanup_functions.append(result.unlink)
65 return result
66
67
68 def load_predictor():
69 # Assumes the working directory is /src
70 config_path = os.path.abspath("cog.yaml")
71 try:
72 with open(config_path) as fh:
73 config = yaml.safe_load(fh)
74 except FileNotFoundError:
75 raise ConfigDoesNotExist(
76 f"Could not find {config_path}",
77 )
78
79 if "predict" not in config:
80 raise PredictorNotSet(
81 "Can't run predictions: 'predict' option not found in cog.yaml"
82 )
83
84 # TODO: handle predict scripts in subdirectories
85 predict_string = config["predict"]
86 module_name, class_name = predict_string.split(".py:", 1)
87 module = importlib.import_module(module_name)
88 predictor_class = getattr(module, class_name)
89 return predictor_class()
90
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/python/cog/predictor.py b/python/cog/predictor.py
--- a/python/cog/predictor.py
+++ b/python/cog/predictor.py
@@ -81,9 +81,11 @@
"Can't run predictions: 'predict' option not found in cog.yaml"
)
- # TODO: handle predict scripts in subdirectories
predict_string = config["predict"]
- module_name, class_name = predict_string.split(".py:", 1)
- module = importlib.import_module(module_name)
+ module_path, class_name = predict_string.split(":", 1)
+ module_name = os.path.basename(module_path).split(".py", 1)[0]
+ spec = importlib.util.spec_from_file_location(module_name, module_path)
+ module = importlib.util.module_from_spec(spec)
+ spec.loader.exec_module(module)
predictor_class = getattr(module, class_name)
return predictor_class()
|
{"golden_diff": "diff --git a/python/cog/predictor.py b/python/cog/predictor.py\n--- a/python/cog/predictor.py\n+++ b/python/cog/predictor.py\n@@ -81,9 +81,11 @@\n \"Can't run predictions: 'predict' option not found in cog.yaml\"\n )\n \n- # TODO: handle predict scripts in subdirectories\n predict_string = config[\"predict\"]\n- module_name, class_name = predict_string.split(\".py:\", 1)\n- module = importlib.import_module(module_name)\n+ module_path, class_name = predict_string.split(\":\", 1)\n+ module_name = os.path.basename(module_path).split(\".py\", 1)[0]\n+ spec = importlib.util.spec_from_file_location(module_name, module_path)\n+ module = importlib.util.module_from_spec(spec)\n+ spec.loader.exec_module(module)\n predictor_class = getattr(module, class_name)\n return predictor_class()\n", "issue": "Support importing model from subdirectories\nThis works:\r\n\r\n```yaml\r\nmodel: \"predict.py:MyModel\"\r\n```\r\n\r\nBut this doesn't:\r\n\r\n```yaml:\r\nmodel: \"mymodel/predict.py:MyModel\"\r\n```\r\n\r\nIt should.\n", "before_files": [{"content": "from abc import ABC, abstractmethod\nimport importlib\nimport os.path\nfrom pathlib import Path\nfrom typing import Dict, Any\n\nimport yaml\n\nfrom .errors import ConfigDoesNotExist, PredictorNotSet\n\n\n# TODO(andreas): handle directory input\n# TODO(andreas): handle List[Dict[str, int]], etc.\n# TODO(andreas): model-level documentation\n\n\nclass Predictor(ABC):\n @abstractmethod\n def setup(self):\n pass\n\n @abstractmethod\n def predict(self, **kwargs):\n pass\n\n def get_type_signature(self):\n \"\"\"\n Returns a dict describing the inputs of the model.\n \"\"\"\n from .input import (\n get_type_name,\n UNSPECIFIED,\n )\n\n inputs = []\n if hasattr(self.predict, \"_inputs\"):\n input_specs = self.predict._inputs\n for spec in input_specs:\n arg: Dict[str, Any] = {\n \"name\": spec.name,\n \"type\": get_type_name(spec.type),\n }\n if spec.help:\n arg[\"help\"] = spec.help\n if spec.default is not UNSPECIFIED:\n arg[\"default\"] = str(spec.default) # TODO: don't string this\n if spec.min is not None:\n arg[\"min\"] = str(spec.min) # TODO: don't string this\n if spec.max is not None:\n arg[\"max\"] = str(spec.max) # TODO: don't string this\n if spec.options is not None:\n arg[\"options\"] = [str(o) for o in spec.options]\n inputs.append(arg)\n return {\"inputs\": inputs}\n\n\ndef run_prediction(predictor, inputs, cleanup_functions):\n \"\"\"\n Run the predictor on the inputs, and append resulting paths\n to cleanup functions for removal.\n \"\"\"\n result = predictor.predict(**inputs)\n if isinstance(result, Path):\n cleanup_functions.append(result.unlink)\n return result\n\n\ndef load_predictor():\n # Assumes the working directory is /src\n config_path = os.path.abspath(\"cog.yaml\")\n try:\n with open(config_path) as fh:\n config = yaml.safe_load(fh)\n except FileNotFoundError:\n raise ConfigDoesNotExist(\n f\"Could not find {config_path}\",\n )\n\n if \"predict\" not in config:\n raise PredictorNotSet(\n \"Can't run predictions: 'predict' option not found in cog.yaml\"\n )\n\n # TODO: handle predict scripts in subdirectories\n predict_string = config[\"predict\"]\n module_name, class_name = predict_string.split(\".py:\", 1)\n module = importlib.import_module(module_name)\n predictor_class = getattr(module, class_name)\n return predictor_class()\n", "path": "python/cog/predictor.py"}], "after_files": [{"content": "from abc import ABC, abstractmethod\nimport importlib\nimport os.path\nfrom pathlib import Path\nfrom typing import Dict, Any\n\nimport yaml\n\nfrom .errors import ConfigDoesNotExist, PredictorNotSet\n\n\n# TODO(andreas): handle directory input\n# TODO(andreas): handle List[Dict[str, int]], etc.\n# TODO(andreas): model-level documentation\n\n\nclass Predictor(ABC):\n @abstractmethod\n def setup(self):\n pass\n\n @abstractmethod\n def predict(self, **kwargs):\n pass\n\n def get_type_signature(self):\n \"\"\"\n Returns a dict describing the inputs of the model.\n \"\"\"\n from .input import (\n get_type_name,\n UNSPECIFIED,\n )\n\n inputs = []\n if hasattr(self.predict, \"_inputs\"):\n input_specs = self.predict._inputs\n for spec in input_specs:\n arg: Dict[str, Any] = {\n \"name\": spec.name,\n \"type\": get_type_name(spec.type),\n }\n if spec.help:\n arg[\"help\"] = spec.help\n if spec.default is not UNSPECIFIED:\n arg[\"default\"] = str(spec.default) # TODO: don't string this\n if spec.min is not None:\n arg[\"min\"] = str(spec.min) # TODO: don't string this\n if spec.max is not None:\n arg[\"max\"] = str(spec.max) # TODO: don't string this\n if spec.options is not None:\n arg[\"options\"] = [str(o) for o in spec.options]\n inputs.append(arg)\n return {\"inputs\": inputs}\n\n\ndef run_prediction(predictor, inputs, cleanup_functions):\n \"\"\"\n Run the predictor on the inputs, and append resulting paths\n to cleanup functions for removal.\n \"\"\"\n result = predictor.predict(**inputs)\n if isinstance(result, Path):\n cleanup_functions.append(result.unlink)\n return result\n\n\ndef load_predictor():\n # Assumes the working directory is /src\n config_path = os.path.abspath(\"cog.yaml\")\n try:\n with open(config_path) as fh:\n config = yaml.safe_load(fh)\n except FileNotFoundError:\n raise ConfigDoesNotExist(\n f\"Could not find {config_path}\",\n )\n\n if \"predict\" not in config:\n raise PredictorNotSet(\n \"Can't run predictions: 'predict' option not found in cog.yaml\"\n )\n\n predict_string = config[\"predict\"]\n module_path, class_name = predict_string.split(\":\", 1)\n module_name = os.path.basename(module_path).split(\".py\", 1)[0]\n spec = importlib.util.spec_from_file_location(module_name, module_path)\n module = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(module)\n predictor_class = getattr(module, class_name)\n return predictor_class()\n", "path": "python/cog/predictor.py"}]}
| 1,060 | 209 |
gh_patches_debug_12895
|
rasdani/github-patches
|
git_diff
|
great-expectations__great_expectations-4066
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Use cleaner solution for non-truncating division in python 2
Prefer `from __future__ import division` to `1.*x/y`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scripts/build_glossary_page.py`
Content:
```
1 import json
2
3
4 def build_glossary_tuples(source_json_path):
5 # Open the source_json_path
6 with open(source_json_path) as json_file:
7 # Read the contents as a json
8 data = json.load(json_file)
9 # Convert to list of tuples containing ("term", "definition", "url")
10 data_list = [(x['term'], x['definition']) for x in data.values()]
11 # Order list alphabetically by "term"
12 data_list.sort(key=lambda y: y[0])
13 # return the ordered list.
14 return data_list
15
16
17 def build_glossary_page(orderd_list_of_terms_tuples, glossary_file_path):
18 # Open the glossary page for editing
19 with open(glossary_file_path, "w") as glossary_file:
20 # Write the glossary page header
21 glossary_file.write("---\nid: glossary\ntitle: \"Glossary of Terms\"\n---\n")
22 # iterate the glossary list of tuples and write glossary entries.
23 for term, definition in orderd_list_of_terms_tuples:
24 glossary_file.write(f"**{term}:** {definition}\n\n")
25
26
27 def all_together_now(source_json_path, glossary_file_path):
28 list_of_terms_tuples = build_glossary_tuples(source_json_path)
29 build_glossary_page(list_of_terms_tuples, glossary_file_path)
30
31
32 if __name__ == '__main__':
33 all_together_now(
34 source_json_path="../docs/term_tags/terms.json",
35 glossary_file_path="../docs/glossary.md"
36 )
37
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/scripts/build_glossary_page.py b/scripts/build_glossary_page.py
--- a/scripts/build_glossary_page.py
+++ b/scripts/build_glossary_page.py
@@ -18,7 +18,7 @@
# Open the glossary page for editing
with open(glossary_file_path, "w") as glossary_file:
# Write the glossary page header
- glossary_file.write("---\nid: glossary\ntitle: \"Glossary of Terms\"\n---\n")
+ glossary_file.write("---\nid: glossary\ntitle: \"Glossary of Terms\"\n---\n\n")
# iterate the glossary list of tuples and write glossary entries.
for term, definition in orderd_list_of_terms_tuples:
glossary_file.write(f"**{term}:** {definition}\n\n")
|
{"golden_diff": "diff --git a/scripts/build_glossary_page.py b/scripts/build_glossary_page.py\n--- a/scripts/build_glossary_page.py\n+++ b/scripts/build_glossary_page.py\n@@ -18,7 +18,7 @@\n # Open the glossary page for editing\n with open(glossary_file_path, \"w\") as glossary_file:\n # Write the glossary page header\n- glossary_file.write(\"---\\nid: glossary\\ntitle: \\\"Glossary of Terms\\\"\\n---\\n\")\n+ glossary_file.write(\"---\\nid: glossary\\ntitle: \\\"Glossary of Terms\\\"\\n---\\n\\n\")\n # iterate the glossary list of tuples and write glossary entries.\n for term, definition in orderd_list_of_terms_tuples:\n glossary_file.write(f\"**{term}:** {definition}\\n\\n\")\n", "issue": "Use cleaner solution for non-truncating division in python 2\nPrefer `from __future__ import division` to `1.*x/y`\n", "before_files": [{"content": "import json\n\n\ndef build_glossary_tuples(source_json_path):\n # Open the source_json_path\n with open(source_json_path) as json_file:\n # Read the contents as a json\n data = json.load(json_file)\n # Convert to list of tuples containing (\"term\", \"definition\", \"url\")\n data_list = [(x['term'], x['definition']) for x in data.values()]\n # Order list alphabetically by \"term\"\n data_list.sort(key=lambda y: y[0])\n # return the ordered list.\n return data_list\n\n\ndef build_glossary_page(orderd_list_of_terms_tuples, glossary_file_path):\n # Open the glossary page for editing\n with open(glossary_file_path, \"w\") as glossary_file:\n # Write the glossary page header\n glossary_file.write(\"---\\nid: glossary\\ntitle: \\\"Glossary of Terms\\\"\\n---\\n\")\n # iterate the glossary list of tuples and write glossary entries.\n for term, definition in orderd_list_of_terms_tuples:\n glossary_file.write(f\"**{term}:** {definition}\\n\\n\")\n\n\ndef all_together_now(source_json_path, glossary_file_path):\n list_of_terms_tuples = build_glossary_tuples(source_json_path)\n build_glossary_page(list_of_terms_tuples, glossary_file_path)\n\n\nif __name__ == '__main__':\n all_together_now(\n source_json_path=\"../docs/term_tags/terms.json\",\n glossary_file_path=\"../docs/glossary.md\"\n )\n", "path": "scripts/build_glossary_page.py"}], "after_files": [{"content": "import json\n\n\ndef build_glossary_tuples(source_json_path):\n # Open the source_json_path\n with open(source_json_path) as json_file:\n # Read the contents as a json\n data = json.load(json_file)\n # Convert to list of tuples containing (\"term\", \"definition\", \"url\")\n data_list = [(x['term'], x['definition']) for x in data.values()]\n # Order list alphabetically by \"term\"\n data_list.sort(key=lambda y: y[0])\n # return the ordered list.\n return data_list\n\n\ndef build_glossary_page(orderd_list_of_terms_tuples, glossary_file_path):\n # Open the glossary page for editing\n with open(glossary_file_path, \"w\") as glossary_file:\n # Write the glossary page header\n glossary_file.write(\"---\\nid: glossary\\ntitle: \\\"Glossary of Terms\\\"\\n---\\n\\n\")\n # iterate the glossary list of tuples and write glossary entries.\n for term, definition in orderd_list_of_terms_tuples:\n glossary_file.write(f\"**{term}:** {definition}\\n\\n\")\n\n\ndef all_together_now(source_json_path, glossary_file_path):\n list_of_terms_tuples = build_glossary_tuples(source_json_path)\n build_glossary_page(list_of_terms_tuples, glossary_file_path)\n\n\nif __name__ == '__main__':\n all_together_now(\n source_json_path=\"../docs/term_tags/terms.json\",\n glossary_file_path=\"../docs/glossary.md\"\n )\n", "path": "scripts/build_glossary_page.py"}]}
| 696 | 191 |
gh_patches_debug_15247
|
rasdani/github-patches
|
git_diff
|
ycm-core__ycmd-397
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
LookupError: unknown encoding on Mac with Python 2
With latest `ycmd` I get the following error on Mac with Py2:
``` python
Traceback (most recent call last):
File "/usr/local/Cellar/python/2.7.11/Frameworks/Python.framework/Versions/2.7/lib/python2.7/runpy.py", line 162, in _run_module_as_main
"__main__", fname, loader, pkg_name)
File "/usr/local/Cellar/python/2.7.11/Frameworks/Python.framework/Versions/2.7/lib/python2.7/runpy.py", line 72, in _run_code
exec code in run_globals
File "/Users/peter/src/ycmd/ycmd/__main__.py", line 181, in <module>
Main()
File "/Users/peter/src/ycmd/ycmd/__main__.py", line 151, in Main
options, hmac_secret = SetupOptions( args.options_file )
File "/Users/peter/src/ycmd/ycmd/__main__.py", line 127, in SetupOptions
options = user_options_store.DefaultOptions()
File "/Users/peter/src/ycmd/ycmd/../ycmd/user_options_store.py", line 52, in DefaultOptions
with open( settings_path ) as f:
LookupError: unknown encoding:
Process ycmd-server exited abnormally with code 1
```
I have a python 2 installation from homebrew
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ycmd/user_options_store.py`
Content:
```
1 # Copyright (C) 2013 Google Inc.
2 #
3 # This file is part of ycmd.
4 #
5 # ycmd is free software: you can redistribute it and/or modify
6 # it under the terms of the GNU General Public License as published by
7 # the Free Software Foundation, either version 3 of the License, or
8 # (at your option) any later version.
9 #
10 # ycmd is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 # GNU General Public License for more details.
14 #
15 # You should have received a copy of the GNU General Public License
16 # along with ycmd. If not, see <http://www.gnu.org/licenses/>.
17
18 from __future__ import unicode_literals
19 from __future__ import print_function
20 from __future__ import division
21 from __future__ import absolute_import
22 from future import standard_library
23 standard_library.install_aliases()
24 from builtins import * # noqa
25
26 import json
27 import os
28 from frozendict import frozendict
29
30 _USER_OPTIONS = {}
31
32 def SetAll( new_options ):
33 global _USER_OPTIONS
34 _USER_OPTIONS = frozendict( new_options )
35
36
37 def GetAll():
38 return _USER_OPTIONS
39
40
41 def Value( key ):
42 return _USER_OPTIONS[ key ]
43
44
45 def LoadDefaults():
46 SetAll( DefaultOptions() )
47
48
49 def DefaultOptions():
50 settings_path = os.path.join(
51 os.path.dirname( os.path.abspath( __file__ ) ), 'default_settings.json' )
52 with open( settings_path ) as f:
53 options = json.loads( f.read() )
54 options.pop( 'hmac_secret', None )
55 return options
56
57
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/ycmd/user_options_store.py b/ycmd/user_options_store.py
--- a/ycmd/user_options_store.py
+++ b/ycmd/user_options_store.py
@@ -27,8 +27,11 @@
import os
from frozendict import frozendict
+from ycmd.utils import ReadFile
+
_USER_OPTIONS = {}
+
def SetAll( new_options ):
global _USER_OPTIONS
_USER_OPTIONS = frozendict( new_options )
@@ -49,8 +52,6 @@
def DefaultOptions():
settings_path = os.path.join(
os.path.dirname( os.path.abspath( __file__ ) ), 'default_settings.json' )
- with open( settings_path ) as f:
- options = json.loads( f.read() )
- options.pop( 'hmac_secret', None )
- return options
-
+ options = json.loads( ReadFile( settings_path ) )
+ options.pop( 'hmac_secret', None )
+ return options
|
{"golden_diff": "diff --git a/ycmd/user_options_store.py b/ycmd/user_options_store.py\n--- a/ycmd/user_options_store.py\n+++ b/ycmd/user_options_store.py\n@@ -27,8 +27,11 @@\n import os\n from frozendict import frozendict\n \n+from ycmd.utils import ReadFile\n+\n _USER_OPTIONS = {}\n \n+\n def SetAll( new_options ):\n global _USER_OPTIONS\n _USER_OPTIONS = frozendict( new_options )\n@@ -49,8 +52,6 @@\n def DefaultOptions():\n settings_path = os.path.join(\n os.path.dirname( os.path.abspath( __file__ ) ), 'default_settings.json' )\n- with open( settings_path ) as f:\n- options = json.loads( f.read() )\n- options.pop( 'hmac_secret', None )\n- return options\n-\n+ options = json.loads( ReadFile( settings_path ) )\n+ options.pop( 'hmac_secret', None )\n+ return options\n", "issue": "LookupError: unknown encoding on Mac with Python 2\nWith latest `ycmd` I get the following error on Mac with Py2:\n\n``` python\nTraceback (most recent call last):\n File \"/usr/local/Cellar/python/2.7.11/Frameworks/Python.framework/Versions/2.7/lib/python2.7/runpy.py\", line 162, in _run_module_as_main\n \"__main__\", fname, loader, pkg_name)\n File \"/usr/local/Cellar/python/2.7.11/Frameworks/Python.framework/Versions/2.7/lib/python2.7/runpy.py\", line 72, in _run_code\n exec code in run_globals\n File \"/Users/peter/src/ycmd/ycmd/__main__.py\", line 181, in <module>\n Main()\n File \"/Users/peter/src/ycmd/ycmd/__main__.py\", line 151, in Main\n options, hmac_secret = SetupOptions( args.options_file )\n File \"/Users/peter/src/ycmd/ycmd/__main__.py\", line 127, in SetupOptions\n options = user_options_store.DefaultOptions()\n File \"/Users/peter/src/ycmd/ycmd/../ycmd/user_options_store.py\", line 52, in DefaultOptions\n with open( settings_path ) as f:\nLookupError: unknown encoding: \n\nProcess ycmd-server exited abnormally with code 1\n```\n\nI have a python 2 installation from homebrew\n\n", "before_files": [{"content": "# Copyright (C) 2013 Google Inc.\n#\n# This file is part of ycmd.\n#\n# ycmd is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# ycmd is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with ycmd. If not, see <http://www.gnu.org/licenses/>.\n\nfrom __future__ import unicode_literals\nfrom __future__ import print_function\nfrom __future__ import division\nfrom __future__ import absolute_import\nfrom future import standard_library\nstandard_library.install_aliases()\nfrom builtins import * # noqa\n\nimport json\nimport os\nfrom frozendict import frozendict\n\n_USER_OPTIONS = {}\n\ndef SetAll( new_options ):\n global _USER_OPTIONS\n _USER_OPTIONS = frozendict( new_options )\n\n\ndef GetAll():\n return _USER_OPTIONS\n\n\ndef Value( key ):\n return _USER_OPTIONS[ key ]\n\n\ndef LoadDefaults():\n SetAll( DefaultOptions() )\n\n\ndef DefaultOptions():\n settings_path = os.path.join(\n os.path.dirname( os.path.abspath( __file__ ) ), 'default_settings.json' )\n with open( settings_path ) as f:\n options = json.loads( f.read() )\n options.pop( 'hmac_secret', None )\n return options\n\n", "path": "ycmd/user_options_store.py"}], "after_files": [{"content": "# Copyright (C) 2013 Google Inc.\n#\n# This file is part of ycmd.\n#\n# ycmd is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# ycmd is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with ycmd. If not, see <http://www.gnu.org/licenses/>.\n\nfrom __future__ import unicode_literals\nfrom __future__ import print_function\nfrom __future__ import division\nfrom __future__ import absolute_import\nfrom future import standard_library\nstandard_library.install_aliases()\nfrom builtins import * # noqa\n\nimport json\nimport os\nfrom frozendict import frozendict\n\nfrom ycmd.utils import ReadFile\n\n_USER_OPTIONS = {}\n\n\ndef SetAll( new_options ):\n global _USER_OPTIONS\n _USER_OPTIONS = frozendict( new_options )\n\n\ndef GetAll():\n return _USER_OPTIONS\n\n\ndef Value( key ):\n return _USER_OPTIONS[ key ]\n\n\ndef LoadDefaults():\n SetAll( DefaultOptions() )\n\n\ndef DefaultOptions():\n settings_path = os.path.join(\n os.path.dirname( os.path.abspath( __file__ ) ), 'default_settings.json' )\n options = json.loads( ReadFile( settings_path ) )\n options.pop( 'hmac_secret', None )\n return options\n", "path": "ycmd/user_options_store.py"}]}
| 1,073 | 222 |
gh_patches_debug_46628
|
rasdani/github-patches
|
git_diff
|
nautobot__nautobot-1028
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
1.2 Homepage object counts for DCIM all point to Rack model
<!--
NOTE: IF YOUR ISSUE DOES NOT FOLLOW THIS TEMPLATE, IT WILL BE CLOSED.
This form is only for reporting reproducible bugs. If you need assistance
with Nautobot installation, or if you have a general question, please start a
discussion instead: https://github.com/nautobot/nautobot/discussions
Please describe the environment in which you are running Nautobot. Be sure
that you are running an unmodified instance of the latest stable release
before submitting a bug report, and that any plugins have been disabled.
-->
### Environment
* Python version: 3.9
* Nautobot version: 1.2.0-alpha1
<!--
Describe in detail the exact steps that someone else can take to reproduce
this bug using the current stable release of Nautobot. Begin with the
creation of any necessary database objects and call out every operation
being performed explicitly. If reporting a bug in the REST API, be sure to
reconstruct the raw HTTP request(s) being made: Don't rely on a client
library such as pynautobot.
-->
### Steps to Reproduce
1. Create a set of each DCIM object which is listed on the homepage, like devices, interfaces, etc., and exactly 1 Rack
2. Navigate to the home page
<!-- What did you expect to happen? -->
### Expected Behavior
The home page widgets for object counts for DCIM should list correct counts for each object type.
<!-- What happened instead? -->
### Observed Behavior
Other than sites, all other DCIM object counts show the number of racks (in this case, exactly 1).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `nautobot/dcim/homepage.py`
Content:
```
1 from nautobot.dcim.models.power import PowerFeed, PowerPanel
2 from nautobot.dcim.models import Cable, ConsolePort, Interface, PowerOutlet, Rack, Site
3 from nautobot.core.apps import HomePageGroup, HomePageItem, HomePagePanel
4
5
6 layout = (
7 HomePagePanel(
8 name="Organization",
9 weight=100,
10 items=(
11 HomePageItem(
12 name="Sites",
13 link="dcim:site_list",
14 model=Site,
15 description="Geographic location",
16 permissions=["dcim.view_site"],
17 weight=100,
18 ),
19 ),
20 ),
21 HomePagePanel(
22 name="DCIM",
23 weight=200,
24 items=(
25 HomePageItem(
26 name="Racks",
27 link="dcim:rack_list",
28 model=Rack,
29 description="Equipment racks, optionally organized by group",
30 permissions=["dcim.view_rack"],
31 weight=100,
32 ),
33 HomePageItem(
34 name="Device Types",
35 link="dcim:devicetype_list",
36 model=Rack,
37 description="Physical hardware models by manufacturer",
38 permissions=["dcim.view_devicetype"],
39 weight=200,
40 ),
41 HomePageItem(
42 name="Devices",
43 link="dcim:device_list",
44 model=Rack,
45 description="Rack-mounted network equipment, servers, and other devices",
46 permissions=["dcim.view_device"],
47 weight=300,
48 ),
49 HomePageItem(
50 name="Virtual Chassis",
51 link="dcim:virtualchassis_list",
52 model=Rack,
53 permissions=["dcim.view_virtualchassis"],
54 description="Represents a set of devices which share a common control plane",
55 weight=400,
56 ),
57 HomePageGroup(
58 name="Connections",
59 weight=500,
60 items=(
61 HomePageItem(
62 name="Cables",
63 link="dcim:cable_list",
64 model=Cable,
65 permissions=["dcim.view_cable"],
66 weight=100,
67 ),
68 HomePageItem(
69 name="Interfaces",
70 link="dcim:interface_connections_list",
71 model=Interface,
72 permissions=["dcim.view_interface"],
73 weight=200,
74 ),
75 HomePageItem(
76 name="Console",
77 link="dcim:console_connections_list",
78 model=ConsolePort,
79 permissions=["dcim.view_consoleport", "dcim.view_consoleserverport"],
80 weight=300,
81 ),
82 HomePageItem(
83 name="Power",
84 link="dcim:power_connections_list",
85 model=PowerOutlet,
86 permissions=["dcim.view_powerport", "dcim.view_poweroutlet"],
87 weight=400,
88 ),
89 ),
90 ),
91 ),
92 ),
93 HomePagePanel(
94 name="Power",
95 weight=300,
96 items=(
97 HomePageItem(
98 name="Power Feeds",
99 link="dcim:powerfeed_list",
100 model=PowerFeed,
101 description="Electrical circuits delivering power from panels",
102 permissions=["dcim.view_powerfeed"],
103 weight=100,
104 ),
105 HomePageItem(
106 name="Power Panels",
107 link="dcim:powerpanel_list",
108 model=PowerPanel,
109 description="Electrical panels receiving utility power",
110 permissions=["dcim.view_powerpanel"],
111 weight=200,
112 ),
113 ),
114 ),
115 )
116
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/nautobot/dcim/homepage.py b/nautobot/dcim/homepage.py
--- a/nautobot/dcim/homepage.py
+++ b/nautobot/dcim/homepage.py
@@ -1,6 +1,5 @@
-from nautobot.dcim.models.power import PowerFeed, PowerPanel
-from nautobot.dcim.models import Cable, ConsolePort, Interface, PowerOutlet, Rack, Site
from nautobot.core.apps import HomePageGroup, HomePageItem, HomePagePanel
+from nautobot.dcim import models
layout = (
@@ -11,7 +10,7 @@
HomePageItem(
name="Sites",
link="dcim:site_list",
- model=Site,
+ model=models.Site,
description="Geographic location",
permissions=["dcim.view_site"],
weight=100,
@@ -25,7 +24,7 @@
HomePageItem(
name="Racks",
link="dcim:rack_list",
- model=Rack,
+ model=models.Rack,
description="Equipment racks, optionally organized by group",
permissions=["dcim.view_rack"],
weight=100,
@@ -33,7 +32,7 @@
HomePageItem(
name="Device Types",
link="dcim:devicetype_list",
- model=Rack,
+ model=models.DeviceType,
description="Physical hardware models by manufacturer",
permissions=["dcim.view_devicetype"],
weight=200,
@@ -41,7 +40,7 @@
HomePageItem(
name="Devices",
link="dcim:device_list",
- model=Rack,
+ model=models.Device,
description="Rack-mounted network equipment, servers, and other devices",
permissions=["dcim.view_device"],
weight=300,
@@ -49,7 +48,7 @@
HomePageItem(
name="Virtual Chassis",
link="dcim:virtualchassis_list",
- model=Rack,
+ model=models.VirtualChassis,
permissions=["dcim.view_virtualchassis"],
description="Represents a set of devices which share a common control plane",
weight=400,
@@ -61,28 +60,28 @@
HomePageItem(
name="Cables",
link="dcim:cable_list",
- model=Cable,
+ model=models.Cable,
permissions=["dcim.view_cable"],
weight=100,
),
HomePageItem(
name="Interfaces",
link="dcim:interface_connections_list",
- model=Interface,
+ model=models.Interface,
permissions=["dcim.view_interface"],
weight=200,
),
HomePageItem(
name="Console",
link="dcim:console_connections_list",
- model=ConsolePort,
+ model=models.ConsolePort,
permissions=["dcim.view_consoleport", "dcim.view_consoleserverport"],
weight=300,
),
HomePageItem(
name="Power",
link="dcim:power_connections_list",
- model=PowerOutlet,
+ model=models.PowerOutlet,
permissions=["dcim.view_powerport", "dcim.view_poweroutlet"],
weight=400,
),
@@ -97,7 +96,7 @@
HomePageItem(
name="Power Feeds",
link="dcim:powerfeed_list",
- model=PowerFeed,
+ model=models.PowerFeed,
description="Electrical circuits delivering power from panels",
permissions=["dcim.view_powerfeed"],
weight=100,
@@ -105,7 +104,7 @@
HomePageItem(
name="Power Panels",
link="dcim:powerpanel_list",
- model=PowerPanel,
+ model=models.PowerPanel,
description="Electrical panels receiving utility power",
permissions=["dcim.view_powerpanel"],
weight=200,
|
{"golden_diff": "diff --git a/nautobot/dcim/homepage.py b/nautobot/dcim/homepage.py\n--- a/nautobot/dcim/homepage.py\n+++ b/nautobot/dcim/homepage.py\n@@ -1,6 +1,5 @@\n-from nautobot.dcim.models.power import PowerFeed, PowerPanel\n-from nautobot.dcim.models import Cable, ConsolePort, Interface, PowerOutlet, Rack, Site\n from nautobot.core.apps import HomePageGroup, HomePageItem, HomePagePanel\n+from nautobot.dcim import models\n \n \n layout = (\n@@ -11,7 +10,7 @@\n HomePageItem(\n name=\"Sites\",\n link=\"dcim:site_list\",\n- model=Site,\n+ model=models.Site,\n description=\"Geographic location\",\n permissions=[\"dcim.view_site\"],\n weight=100,\n@@ -25,7 +24,7 @@\n HomePageItem(\n name=\"Racks\",\n link=\"dcim:rack_list\",\n- model=Rack,\n+ model=models.Rack,\n description=\"Equipment racks, optionally organized by group\",\n permissions=[\"dcim.view_rack\"],\n weight=100,\n@@ -33,7 +32,7 @@\n HomePageItem(\n name=\"Device Types\",\n link=\"dcim:devicetype_list\",\n- model=Rack,\n+ model=models.DeviceType,\n description=\"Physical hardware models by manufacturer\",\n permissions=[\"dcim.view_devicetype\"],\n weight=200,\n@@ -41,7 +40,7 @@\n HomePageItem(\n name=\"Devices\",\n link=\"dcim:device_list\",\n- model=Rack,\n+ model=models.Device,\n description=\"Rack-mounted network equipment, servers, and other devices\",\n permissions=[\"dcim.view_device\"],\n weight=300,\n@@ -49,7 +48,7 @@\n HomePageItem(\n name=\"Virtual Chassis\",\n link=\"dcim:virtualchassis_list\",\n- model=Rack,\n+ model=models.VirtualChassis,\n permissions=[\"dcim.view_virtualchassis\"],\n description=\"Represents a set of devices which share a common control plane\",\n weight=400,\n@@ -61,28 +60,28 @@\n HomePageItem(\n name=\"Cables\",\n link=\"dcim:cable_list\",\n- model=Cable,\n+ model=models.Cable,\n permissions=[\"dcim.view_cable\"],\n weight=100,\n ),\n HomePageItem(\n name=\"Interfaces\",\n link=\"dcim:interface_connections_list\",\n- model=Interface,\n+ model=models.Interface,\n permissions=[\"dcim.view_interface\"],\n weight=200,\n ),\n HomePageItem(\n name=\"Console\",\n link=\"dcim:console_connections_list\",\n- model=ConsolePort,\n+ model=models.ConsolePort,\n permissions=[\"dcim.view_consoleport\", \"dcim.view_consoleserverport\"],\n weight=300,\n ),\n HomePageItem(\n name=\"Power\",\n link=\"dcim:power_connections_list\",\n- model=PowerOutlet,\n+ model=models.PowerOutlet,\n permissions=[\"dcim.view_powerport\", \"dcim.view_poweroutlet\"],\n weight=400,\n ),\n@@ -97,7 +96,7 @@\n HomePageItem(\n name=\"Power Feeds\",\n link=\"dcim:powerfeed_list\",\n- model=PowerFeed,\n+ model=models.PowerFeed,\n description=\"Electrical circuits delivering power from panels\",\n permissions=[\"dcim.view_powerfeed\"],\n weight=100,\n@@ -105,7 +104,7 @@\n HomePageItem(\n name=\"Power Panels\",\n link=\"dcim:powerpanel_list\",\n- model=PowerPanel,\n+ model=models.PowerPanel,\n description=\"Electrical panels receiving utility power\",\n permissions=[\"dcim.view_powerpanel\"],\n weight=200,\n", "issue": "1.2 Homepage object counts for DCIM all point to Rack model\n<!--\r\n NOTE: IF YOUR ISSUE DOES NOT FOLLOW THIS TEMPLATE, IT WILL BE CLOSED.\r\n\r\n This form is only for reporting reproducible bugs. If you need assistance\r\n with Nautobot installation, or if you have a general question, please start a\r\n discussion instead: https://github.com/nautobot/nautobot/discussions\r\n\r\n Please describe the environment in which you are running Nautobot. Be sure\r\n that you are running an unmodified instance of the latest stable release\r\n before submitting a bug report, and that any plugins have been disabled.\r\n-->\r\n### Environment\r\n* Python version: 3.9\r\n* Nautobot version: 1.2.0-alpha1\r\n\r\n<!--\r\n Describe in detail the exact steps that someone else can take to reproduce\r\n this bug using the current stable release of Nautobot. Begin with the\r\n creation of any necessary database objects and call out every operation\r\n being performed explicitly. If reporting a bug in the REST API, be sure to\r\n reconstruct the raw HTTP request(s) being made: Don't rely on a client\r\n library such as pynautobot.\r\n-->\r\n### Steps to Reproduce\r\n1. Create a set of each DCIM object which is listed on the homepage, like devices, interfaces, etc., and exactly 1 Rack\r\n2. Navigate to the home page\r\n\r\n<!-- What did you expect to happen? -->\r\n### Expected Behavior\r\nThe home page widgets for object counts for DCIM should list correct counts for each object type.\r\n\r\n<!-- What happened instead? -->\r\n### Observed Behavior\r\nOther than sites, all other DCIM object counts show the number of racks (in this case, exactly 1).\n", "before_files": [{"content": "from nautobot.dcim.models.power import PowerFeed, PowerPanel\nfrom nautobot.dcim.models import Cable, ConsolePort, Interface, PowerOutlet, Rack, Site\nfrom nautobot.core.apps import HomePageGroup, HomePageItem, HomePagePanel\n\n\nlayout = (\n HomePagePanel(\n name=\"Organization\",\n weight=100,\n items=(\n HomePageItem(\n name=\"Sites\",\n link=\"dcim:site_list\",\n model=Site,\n description=\"Geographic location\",\n permissions=[\"dcim.view_site\"],\n weight=100,\n ),\n ),\n ),\n HomePagePanel(\n name=\"DCIM\",\n weight=200,\n items=(\n HomePageItem(\n name=\"Racks\",\n link=\"dcim:rack_list\",\n model=Rack,\n description=\"Equipment racks, optionally organized by group\",\n permissions=[\"dcim.view_rack\"],\n weight=100,\n ),\n HomePageItem(\n name=\"Device Types\",\n link=\"dcim:devicetype_list\",\n model=Rack,\n description=\"Physical hardware models by manufacturer\",\n permissions=[\"dcim.view_devicetype\"],\n weight=200,\n ),\n HomePageItem(\n name=\"Devices\",\n link=\"dcim:device_list\",\n model=Rack,\n description=\"Rack-mounted network equipment, servers, and other devices\",\n permissions=[\"dcim.view_device\"],\n weight=300,\n ),\n HomePageItem(\n name=\"Virtual Chassis\",\n link=\"dcim:virtualchassis_list\",\n model=Rack,\n permissions=[\"dcim.view_virtualchassis\"],\n description=\"Represents a set of devices which share a common control plane\",\n weight=400,\n ),\n HomePageGroup(\n name=\"Connections\",\n weight=500,\n items=(\n HomePageItem(\n name=\"Cables\",\n link=\"dcim:cable_list\",\n model=Cable,\n permissions=[\"dcim.view_cable\"],\n weight=100,\n ),\n HomePageItem(\n name=\"Interfaces\",\n link=\"dcim:interface_connections_list\",\n model=Interface,\n permissions=[\"dcim.view_interface\"],\n weight=200,\n ),\n HomePageItem(\n name=\"Console\",\n link=\"dcim:console_connections_list\",\n model=ConsolePort,\n permissions=[\"dcim.view_consoleport\", \"dcim.view_consoleserverport\"],\n weight=300,\n ),\n HomePageItem(\n name=\"Power\",\n link=\"dcim:power_connections_list\",\n model=PowerOutlet,\n permissions=[\"dcim.view_powerport\", \"dcim.view_poweroutlet\"],\n weight=400,\n ),\n ),\n ),\n ),\n ),\n HomePagePanel(\n name=\"Power\",\n weight=300,\n items=(\n HomePageItem(\n name=\"Power Feeds\",\n link=\"dcim:powerfeed_list\",\n model=PowerFeed,\n description=\"Electrical circuits delivering power from panels\",\n permissions=[\"dcim.view_powerfeed\"],\n weight=100,\n ),\n HomePageItem(\n name=\"Power Panels\",\n link=\"dcim:powerpanel_list\",\n model=PowerPanel,\n description=\"Electrical panels receiving utility power\",\n permissions=[\"dcim.view_powerpanel\"],\n weight=200,\n ),\n ),\n ),\n)\n", "path": "nautobot/dcim/homepage.py"}], "after_files": [{"content": "from nautobot.core.apps import HomePageGroup, HomePageItem, HomePagePanel\nfrom nautobot.dcim import models\n\n\nlayout = (\n HomePagePanel(\n name=\"Organization\",\n weight=100,\n items=(\n HomePageItem(\n name=\"Sites\",\n link=\"dcim:site_list\",\n model=models.Site,\n description=\"Geographic location\",\n permissions=[\"dcim.view_site\"],\n weight=100,\n ),\n ),\n ),\n HomePagePanel(\n name=\"DCIM\",\n weight=200,\n items=(\n HomePageItem(\n name=\"Racks\",\n link=\"dcim:rack_list\",\n model=models.Rack,\n description=\"Equipment racks, optionally organized by group\",\n permissions=[\"dcim.view_rack\"],\n weight=100,\n ),\n HomePageItem(\n name=\"Device Types\",\n link=\"dcim:devicetype_list\",\n model=models.DeviceType,\n description=\"Physical hardware models by manufacturer\",\n permissions=[\"dcim.view_devicetype\"],\n weight=200,\n ),\n HomePageItem(\n name=\"Devices\",\n link=\"dcim:device_list\",\n model=models.Device,\n description=\"Rack-mounted network equipment, servers, and other devices\",\n permissions=[\"dcim.view_device\"],\n weight=300,\n ),\n HomePageItem(\n name=\"Virtual Chassis\",\n link=\"dcim:virtualchassis_list\",\n model=models.VirtualChassis,\n permissions=[\"dcim.view_virtualchassis\"],\n description=\"Represents a set of devices which share a common control plane\",\n weight=400,\n ),\n HomePageGroup(\n name=\"Connections\",\n weight=500,\n items=(\n HomePageItem(\n name=\"Cables\",\n link=\"dcim:cable_list\",\n model=models.Cable,\n permissions=[\"dcim.view_cable\"],\n weight=100,\n ),\n HomePageItem(\n name=\"Interfaces\",\n link=\"dcim:interface_connections_list\",\n model=models.Interface,\n permissions=[\"dcim.view_interface\"],\n weight=200,\n ),\n HomePageItem(\n name=\"Console\",\n link=\"dcim:console_connections_list\",\n model=models.ConsolePort,\n permissions=[\"dcim.view_consoleport\", \"dcim.view_consoleserverport\"],\n weight=300,\n ),\n HomePageItem(\n name=\"Power\",\n link=\"dcim:power_connections_list\",\n model=models.PowerOutlet,\n permissions=[\"dcim.view_powerport\", \"dcim.view_poweroutlet\"],\n weight=400,\n ),\n ),\n ),\n ),\n ),\n HomePagePanel(\n name=\"Power\",\n weight=300,\n items=(\n HomePageItem(\n name=\"Power Feeds\",\n link=\"dcim:powerfeed_list\",\n model=models.PowerFeed,\n description=\"Electrical circuits delivering power from panels\",\n permissions=[\"dcim.view_powerfeed\"],\n weight=100,\n ),\n HomePageItem(\n name=\"Power Panels\",\n link=\"dcim:powerpanel_list\",\n model=models.PowerPanel,\n description=\"Electrical panels receiving utility power\",\n permissions=[\"dcim.view_powerpanel\"],\n weight=200,\n ),\n ),\n ),\n)\n", "path": "nautobot/dcim/homepage.py"}]}
| 1,586 | 860 |
gh_patches_debug_33144
|
rasdani/github-patches
|
git_diff
|
scoutapp__scout_apm_python-358
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Instrument Starlette authentication
Starlette has a basic authentication framework: https://www.starlette.io/authentication/
We should probably copy what we do for Django here, tagging user ID's/ display names on traces.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/scout_apm/async_/starlette.py`
Content:
```
1 # coding=utf-8
2 from __future__ import absolute_import, division, print_function, unicode_literals
3
4 import wrapt
5 from starlette.background import BackgroundTask
6 from starlette.requests import Request
7
8 import scout_apm.core
9 from scout_apm.core.tracked_request import TrackedRequest
10 from scout_apm.core.web_requests import (
11 create_filtered_path,
12 ignore_path,
13 track_amazon_request_queue_time,
14 track_request_queue_time,
15 )
16
17
18 class ScoutMiddleware:
19 def __init__(self, app):
20 self.app = app
21 installed = scout_apm.core.install()
22 self._do_nothing = not installed
23 if installed:
24 install_background_instrumentation()
25
26 async def __call__(self, scope, receive, send):
27 if self._do_nothing or scope["type"] != "http":
28 await self.app(scope, receive, send)
29 return
30
31 request = Request(scope)
32 tracked_request = TrackedRequest.instance()
33 # Can't name controller until post-routing - see final clause
34 controller_span = tracked_request.start_span(operation="Controller/Unknown")
35
36 tracked_request.tag(
37 "path",
38 create_filtered_path(request.url.path, request.query_params.multi_items()),
39 )
40 if ignore_path(request.url.path):
41 tracked_request.tag("ignore_transaction", True)
42
43 user_ip = (
44 request.headers.get("x-forwarded-for", default="").split(",")[0]
45 or request.headers.get("client-ip", default="").split(",")[0]
46 or request.client.host
47 )
48 tracked_request.tag("user_ip", user_ip)
49
50 queue_time = request.headers.get(
51 "x-queue-start", default=""
52 ) or request.headers.get("x-request-start", default="")
53 tracked_queue_time = track_request_queue_time(queue_time, tracked_request)
54 if not tracked_queue_time:
55 amazon_queue_time = request.headers.get("x-amzn-trace-id", default="")
56 track_amazon_request_queue_time(amazon_queue_time, tracked_request)
57
58 def rename_controller_span_from_endpoint():
59 if "endpoint" in scope:
60 # Rename top span
61 endpoint = scope["endpoint"]
62 controller_span.operation = "Controller/{}.{}".format(
63 endpoint.__module__, endpoint.__qualname__
64 )
65 tracked_request.is_real_request = True
66
67 async def wrapped_send(data):
68 # Finish HTTP span when body finishes sending, not later (e.g.
69 # after background tasks)
70 if data.get("type", None) == "http.response.body" and not data.get(
71 "more_body", False
72 ):
73 rename_controller_span_from_endpoint()
74 tracked_request.stop_span()
75 return await send(data)
76
77 try:
78 await self.app(scope, receive, wrapped_send)
79 except Exception as exc:
80 tracked_request.tag("error", "true")
81 raise exc
82 finally:
83 if tracked_request.end_time is None:
84 rename_controller_span_from_endpoint()
85 tracked_request.stop_span()
86
87
88 background_instrumentation_installed = False
89
90
91 def install_background_instrumentation():
92 global background_instrumentation_installed
93 if background_instrumentation_installed:
94 return
95 background_instrumentation_installed = True
96
97 @wrapt.decorator
98 async def wrapped_background_call(wrapped, instance, args, kwargs):
99 tracked_request = TrackedRequest.instance()
100 tracked_request.is_real_request = True
101 tracked_request.start_span(
102 operation="Job/{}.{}".format(
103 instance.func.__module__, instance.func.__qualname__
104 )
105 )
106 try:
107 return await wrapped(*args, **kwargs)
108 finally:
109 tracked_request.stop_span()
110
111 BackgroundTask.__call__ = wrapped_background_call(BackgroundTask.__call__)
112
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/scout_apm/async_/starlette.py b/src/scout_apm/async_/starlette.py
--- a/src/scout_apm/async_/starlette.py
+++ b/src/scout_apm/async_/starlette.py
@@ -55,7 +55,7 @@
amazon_queue_time = request.headers.get("x-amzn-trace-id", default="")
track_amazon_request_queue_time(amazon_queue_time, tracked_request)
- def rename_controller_span_from_endpoint():
+ def grab_extra_data():
if "endpoint" in scope:
# Rename top span
endpoint = scope["endpoint"]
@@ -64,13 +64,23 @@
)
tracked_request.is_real_request = True
+ # From AuthenticationMiddleware - bypass request.user because it
+ # throws AssertionError if 'user' is not in Scope, and we need a
+ # try/except already
+ try:
+ username = scope["user"].display_name
+ except (KeyError, AttributeError):
+ pass
+ else:
+ tracked_request.tag("username", username)
+
async def wrapped_send(data):
# Finish HTTP span when body finishes sending, not later (e.g.
# after background tasks)
if data.get("type", None) == "http.response.body" and not data.get(
"more_body", False
):
- rename_controller_span_from_endpoint()
+ grab_extra_data()
tracked_request.stop_span()
return await send(data)
@@ -81,7 +91,7 @@
raise exc
finally:
if tracked_request.end_time is None:
- rename_controller_span_from_endpoint()
+ grab_extra_data()
tracked_request.stop_span()
|
{"golden_diff": "diff --git a/src/scout_apm/async_/starlette.py b/src/scout_apm/async_/starlette.py\n--- a/src/scout_apm/async_/starlette.py\n+++ b/src/scout_apm/async_/starlette.py\n@@ -55,7 +55,7 @@\n amazon_queue_time = request.headers.get(\"x-amzn-trace-id\", default=\"\")\n track_amazon_request_queue_time(amazon_queue_time, tracked_request)\n \n- def rename_controller_span_from_endpoint():\n+ def grab_extra_data():\n if \"endpoint\" in scope:\n # Rename top span\n endpoint = scope[\"endpoint\"]\n@@ -64,13 +64,23 @@\n )\n tracked_request.is_real_request = True\n \n+ # From AuthenticationMiddleware - bypass request.user because it\n+ # throws AssertionError if 'user' is not in Scope, and we need a\n+ # try/except already\n+ try:\n+ username = scope[\"user\"].display_name\n+ except (KeyError, AttributeError):\n+ pass\n+ else:\n+ tracked_request.tag(\"username\", username)\n+\n async def wrapped_send(data):\n # Finish HTTP span when body finishes sending, not later (e.g.\n # after background tasks)\n if data.get(\"type\", None) == \"http.response.body\" and not data.get(\n \"more_body\", False\n ):\n- rename_controller_span_from_endpoint()\n+ grab_extra_data()\n tracked_request.stop_span()\n return await send(data)\n \n@@ -81,7 +91,7 @@\n raise exc\n finally:\n if tracked_request.end_time is None:\n- rename_controller_span_from_endpoint()\n+ grab_extra_data()\n tracked_request.stop_span()\n", "issue": "Instrument Starlette authentication\nStarlette has a basic authentication framework: https://www.starlette.io/authentication/\r\n\r\nWe should probably copy what we do for Django here, tagging user ID's/ display names on traces.\n", "before_files": [{"content": "# coding=utf-8\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport wrapt\nfrom starlette.background import BackgroundTask\nfrom starlette.requests import Request\n\nimport scout_apm.core\nfrom scout_apm.core.tracked_request import TrackedRequest\nfrom scout_apm.core.web_requests import (\n create_filtered_path,\n ignore_path,\n track_amazon_request_queue_time,\n track_request_queue_time,\n)\n\n\nclass ScoutMiddleware:\n def __init__(self, app):\n self.app = app\n installed = scout_apm.core.install()\n self._do_nothing = not installed\n if installed:\n install_background_instrumentation()\n\n async def __call__(self, scope, receive, send):\n if self._do_nothing or scope[\"type\"] != \"http\":\n await self.app(scope, receive, send)\n return\n\n request = Request(scope)\n tracked_request = TrackedRequest.instance()\n # Can't name controller until post-routing - see final clause\n controller_span = tracked_request.start_span(operation=\"Controller/Unknown\")\n\n tracked_request.tag(\n \"path\",\n create_filtered_path(request.url.path, request.query_params.multi_items()),\n )\n if ignore_path(request.url.path):\n tracked_request.tag(\"ignore_transaction\", True)\n\n user_ip = (\n request.headers.get(\"x-forwarded-for\", default=\"\").split(\",\")[0]\n or request.headers.get(\"client-ip\", default=\"\").split(\",\")[0]\n or request.client.host\n )\n tracked_request.tag(\"user_ip\", user_ip)\n\n queue_time = request.headers.get(\n \"x-queue-start\", default=\"\"\n ) or request.headers.get(\"x-request-start\", default=\"\")\n tracked_queue_time = track_request_queue_time(queue_time, tracked_request)\n if not tracked_queue_time:\n amazon_queue_time = request.headers.get(\"x-amzn-trace-id\", default=\"\")\n track_amazon_request_queue_time(amazon_queue_time, tracked_request)\n\n def rename_controller_span_from_endpoint():\n if \"endpoint\" in scope:\n # Rename top span\n endpoint = scope[\"endpoint\"]\n controller_span.operation = \"Controller/{}.{}\".format(\n endpoint.__module__, endpoint.__qualname__\n )\n tracked_request.is_real_request = True\n\n async def wrapped_send(data):\n # Finish HTTP span when body finishes sending, not later (e.g.\n # after background tasks)\n if data.get(\"type\", None) == \"http.response.body\" and not data.get(\n \"more_body\", False\n ):\n rename_controller_span_from_endpoint()\n tracked_request.stop_span()\n return await send(data)\n\n try:\n await self.app(scope, receive, wrapped_send)\n except Exception as exc:\n tracked_request.tag(\"error\", \"true\")\n raise exc\n finally:\n if tracked_request.end_time is None:\n rename_controller_span_from_endpoint()\n tracked_request.stop_span()\n\n\nbackground_instrumentation_installed = False\n\n\ndef install_background_instrumentation():\n global background_instrumentation_installed\n if background_instrumentation_installed:\n return\n background_instrumentation_installed = True\n\n @wrapt.decorator\n async def wrapped_background_call(wrapped, instance, args, kwargs):\n tracked_request = TrackedRequest.instance()\n tracked_request.is_real_request = True\n tracked_request.start_span(\n operation=\"Job/{}.{}\".format(\n instance.func.__module__, instance.func.__qualname__\n )\n )\n try:\n return await wrapped(*args, **kwargs)\n finally:\n tracked_request.stop_span()\n\n BackgroundTask.__call__ = wrapped_background_call(BackgroundTask.__call__)\n", "path": "src/scout_apm/async_/starlette.py"}], "after_files": [{"content": "# coding=utf-8\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport wrapt\nfrom starlette.background import BackgroundTask\nfrom starlette.requests import Request\n\nimport scout_apm.core\nfrom scout_apm.core.tracked_request import TrackedRequest\nfrom scout_apm.core.web_requests import (\n create_filtered_path,\n ignore_path,\n track_amazon_request_queue_time,\n track_request_queue_time,\n)\n\n\nclass ScoutMiddleware:\n def __init__(self, app):\n self.app = app\n installed = scout_apm.core.install()\n self._do_nothing = not installed\n if installed:\n install_background_instrumentation()\n\n async def __call__(self, scope, receive, send):\n if self._do_nothing or scope[\"type\"] != \"http\":\n await self.app(scope, receive, send)\n return\n\n request = Request(scope)\n tracked_request = TrackedRequest.instance()\n # Can't name controller until post-routing - see final clause\n controller_span = tracked_request.start_span(operation=\"Controller/Unknown\")\n\n tracked_request.tag(\n \"path\",\n create_filtered_path(request.url.path, request.query_params.multi_items()),\n )\n if ignore_path(request.url.path):\n tracked_request.tag(\"ignore_transaction\", True)\n\n user_ip = (\n request.headers.get(\"x-forwarded-for\", default=\"\").split(\",\")[0]\n or request.headers.get(\"client-ip\", default=\"\").split(\",\")[0]\n or request.client.host\n )\n tracked_request.tag(\"user_ip\", user_ip)\n\n queue_time = request.headers.get(\n \"x-queue-start\", default=\"\"\n ) or request.headers.get(\"x-request-start\", default=\"\")\n tracked_queue_time = track_request_queue_time(queue_time, tracked_request)\n if not tracked_queue_time:\n amazon_queue_time = request.headers.get(\"x-amzn-trace-id\", default=\"\")\n track_amazon_request_queue_time(amazon_queue_time, tracked_request)\n\n def grab_extra_data():\n if \"endpoint\" in scope:\n # Rename top span\n endpoint = scope[\"endpoint\"]\n controller_span.operation = \"Controller/{}.{}\".format(\n endpoint.__module__, endpoint.__qualname__\n )\n tracked_request.is_real_request = True\n\n # From AuthenticationMiddleware - bypass request.user because it\n # throws AssertionError if 'user' is not in Scope, and we need a\n # try/except already\n try:\n username = scope[\"user\"].display_name\n except (KeyError, AttributeError):\n pass\n else:\n tracked_request.tag(\"username\", username)\n\n async def wrapped_send(data):\n # Finish HTTP span when body finishes sending, not later (e.g.\n # after background tasks)\n if data.get(\"type\", None) == \"http.response.body\" and not data.get(\n \"more_body\", False\n ):\n grab_extra_data()\n tracked_request.stop_span()\n return await send(data)\n\n try:\n await self.app(scope, receive, wrapped_send)\n except Exception as exc:\n tracked_request.tag(\"error\", \"true\")\n raise exc\n finally:\n if tracked_request.end_time is None:\n grab_extra_data()\n tracked_request.stop_span()\n\n\nbackground_instrumentation_installed = False\n\n\ndef install_background_instrumentation():\n global background_instrumentation_installed\n if background_instrumentation_installed:\n return\n background_instrumentation_installed = True\n\n @wrapt.decorator\n async def wrapped_background_call(wrapped, instance, args, kwargs):\n tracked_request = TrackedRequest.instance()\n tracked_request.is_real_request = True\n tracked_request.start_span(\n operation=\"Job/{}.{}\".format(\n instance.func.__module__, instance.func.__qualname__\n )\n )\n try:\n return await wrapped(*args, **kwargs)\n finally:\n tracked_request.stop_span()\n\n BackgroundTask.__call__ = wrapped_background_call(BackgroundTask.__call__)\n", "path": "src/scout_apm/async_/starlette.py"}]}
| 1,318 | 380 |
gh_patches_debug_25796
|
rasdani/github-patches
|
git_diff
|
akvo__akvo-rsr-3426
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Results framework loads very slowly for projects with lot of indicator dimensions
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `akvo/rest/views/indicator_dimension.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 # Akvo RSR is covered by the GNU Affero General Public License.
4 # See more details in the license.txt file located at the root folder of the Akvo RSR module.
5 # For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.
6
7
8 from akvo.rsr.models import IndicatorDimension
9
10 from ..serializers import IndicatorDimensionSerializer
11 from ..viewsets import PublicProjectViewSet
12
13
14 class IndicatorDimensionViewSet(PublicProjectViewSet):
15 """
16 """
17 queryset = IndicatorDimension.objects.all()
18 serializer_class = IndicatorDimensionSerializer
19 project_relation = 'indicator__result__project__'
20
```
Path: `akvo/rest/pagination.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 # Akvo RSR is covered by the GNU Affero General Public License.
4 # See more details in the license.txt file located at the root folder of the Akvo RSR module.
5 # For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.
6
7 from rest_framework import pagination
8 from rest_framework.response import Response
9
10
11 class LimitSizePageNumberPagination(pagination.PageNumberPagination):
12 page_size = 30
13 page_size_query_param = 'limit'
14 max_page_size = 100
15
16
17 class TastypieOffsetPagination(pagination.LimitOffsetPagination):
18
19 def get_paginated_response(self, data):
20 """ Emulate the old style Tastypie format if the URL contains /api/v1/
21 """
22 return Response({
23 'meta': {
24 'next': self.get_next_link(),
25 'previous': self.get_previous_link(),
26 'total_count': self.count,
27 'limit': self.limit,
28 'offset': self.offset,
29 },
30 'objects': data
31 })
32
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/akvo/rest/pagination.py b/akvo/rest/pagination.py
--- a/akvo/rest/pagination.py
+++ b/akvo/rest/pagination.py
@@ -8,12 +8,18 @@
from rest_framework.response import Response
-class LimitSizePageNumberPagination(pagination.PageNumberPagination):
+class StandardSizePageNumberPagination(pagination.PageNumberPagination):
page_size = 30
page_size_query_param = 'limit'
max_page_size = 100
+class LargeSizePageNumberPagination(pagination.PageNumberPagination):
+ page_size = 100
+ page_size_query_param = 'limit'
+ max_page_size = 1000
+
+
class TastypieOffsetPagination(pagination.LimitOffsetPagination):
def get_paginated_response(self, data):
diff --git a/akvo/rest/views/indicator_dimension.py b/akvo/rest/views/indicator_dimension.py
--- a/akvo/rest/views/indicator_dimension.py
+++ b/akvo/rest/views/indicator_dimension.py
@@ -6,6 +6,7 @@
from akvo.rsr.models import IndicatorDimension
+from akvo.rest.pagination import LargeSizePageNumberPagination
from ..serializers import IndicatorDimensionSerializer
from ..viewsets import PublicProjectViewSet
@@ -17,3 +18,4 @@
queryset = IndicatorDimension.objects.all()
serializer_class = IndicatorDimensionSerializer
project_relation = 'indicator__result__project__'
+ pagination_class = LargeSizePageNumberPagination
|
{"golden_diff": "diff --git a/akvo/rest/pagination.py b/akvo/rest/pagination.py\n--- a/akvo/rest/pagination.py\n+++ b/akvo/rest/pagination.py\n@@ -8,12 +8,18 @@\n from rest_framework.response import Response\n \n \n-class LimitSizePageNumberPagination(pagination.PageNumberPagination):\n+class StandardSizePageNumberPagination(pagination.PageNumberPagination):\n page_size = 30\n page_size_query_param = 'limit'\n max_page_size = 100\n \n \n+class LargeSizePageNumberPagination(pagination.PageNumberPagination):\n+ page_size = 100\n+ page_size_query_param = 'limit'\n+ max_page_size = 1000\n+\n+\n class TastypieOffsetPagination(pagination.LimitOffsetPagination):\n \n def get_paginated_response(self, data):\ndiff --git a/akvo/rest/views/indicator_dimension.py b/akvo/rest/views/indicator_dimension.py\n--- a/akvo/rest/views/indicator_dimension.py\n+++ b/akvo/rest/views/indicator_dimension.py\n@@ -6,6 +6,7 @@\n \n \n from akvo.rsr.models import IndicatorDimension\n+from akvo.rest.pagination import LargeSizePageNumberPagination\n \n from ..serializers import IndicatorDimensionSerializer\n from ..viewsets import PublicProjectViewSet\n@@ -17,3 +18,4 @@\n queryset = IndicatorDimension.objects.all()\n serializer_class = IndicatorDimensionSerializer\n project_relation = 'indicator__result__project__'\n+ pagination_class = LargeSizePageNumberPagination\n", "issue": "Results framework loads very slowly for projects with lot of indicator dimensions\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Akvo RSR is covered by the GNU Affero General Public License.\n# See more details in the license.txt file located at the root folder of the Akvo RSR module.\n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\n\nfrom akvo.rsr.models import IndicatorDimension\n\nfrom ..serializers import IndicatorDimensionSerializer\nfrom ..viewsets import PublicProjectViewSet\n\n\nclass IndicatorDimensionViewSet(PublicProjectViewSet):\n \"\"\"\n \"\"\"\n queryset = IndicatorDimension.objects.all()\n serializer_class = IndicatorDimensionSerializer\n project_relation = 'indicator__result__project__'\n", "path": "akvo/rest/views/indicator_dimension.py"}, {"content": "# -*- coding: utf-8 -*-\n\n# Akvo RSR is covered by the GNU Affero General Public License.\n# See more details in the license.txt file located at the root folder of the Akvo RSR module.\n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\nfrom rest_framework import pagination\nfrom rest_framework.response import Response\n\n\nclass LimitSizePageNumberPagination(pagination.PageNumberPagination):\n page_size = 30\n page_size_query_param = 'limit'\n max_page_size = 100\n\n\nclass TastypieOffsetPagination(pagination.LimitOffsetPagination):\n\n def get_paginated_response(self, data):\n \"\"\" Emulate the old style Tastypie format if the URL contains /api/v1/\n \"\"\"\n return Response({\n 'meta': {\n 'next': self.get_next_link(),\n 'previous': self.get_previous_link(),\n 'total_count': self.count,\n 'limit': self.limit,\n 'offset': self.offset,\n },\n 'objects': data\n })\n", "path": "akvo/rest/pagination.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Akvo RSR is covered by the GNU Affero General Public License.\n# See more details in the license.txt file located at the root folder of the Akvo RSR module.\n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\n\nfrom akvo.rsr.models import IndicatorDimension\nfrom akvo.rest.pagination import LargeSizePageNumberPagination\n\nfrom ..serializers import IndicatorDimensionSerializer\nfrom ..viewsets import PublicProjectViewSet\n\n\nclass IndicatorDimensionViewSet(PublicProjectViewSet):\n \"\"\"\n \"\"\"\n queryset = IndicatorDimension.objects.all()\n serializer_class = IndicatorDimensionSerializer\n project_relation = 'indicator__result__project__'\n pagination_class = LargeSizePageNumberPagination\n", "path": "akvo/rest/views/indicator_dimension.py"}, {"content": "# -*- coding: utf-8 -*-\n\n# Akvo RSR is covered by the GNU Affero General Public License.\n# See more details in the license.txt file located at the root folder of the Akvo RSR module.\n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\nfrom rest_framework import pagination\nfrom rest_framework.response import Response\n\n\nclass StandardSizePageNumberPagination(pagination.PageNumberPagination):\n page_size = 30\n page_size_query_param = 'limit'\n max_page_size = 100\n\n\nclass LargeSizePageNumberPagination(pagination.PageNumberPagination):\n page_size = 100\n page_size_query_param = 'limit'\n max_page_size = 1000\n\n\nclass TastypieOffsetPagination(pagination.LimitOffsetPagination):\n\n def get_paginated_response(self, data):\n \"\"\" Emulate the old style Tastypie format if the URL contains /api/v1/\n \"\"\"\n return Response({\n 'meta': {\n 'next': self.get_next_link(),\n 'previous': self.get_previous_link(),\n 'total_count': self.count,\n 'limit': self.limit,\n 'offset': self.offset,\n },\n 'objects': data\n })\n", "path": "akvo/rest/pagination.py"}]}
| 752 | 336 |
gh_patches_debug_15993
|
rasdani/github-patches
|
git_diff
|
certbot__certbot-4897
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add more context to "Easy vs Secure" prompt
Right now you get this prompt for redirect
```
Please choose whether HTTPS access is required or optional.
-------------------------------------------------------------------------------
1: Easy - Allow both HTTP and HTTPS access to these sites
2: Secure - Make all requests redirect to secure HTTPS access
-------------------------------------------------------------------------------
```
One confusing thing: The title asks "required or optional," but those words don't appear in either option.
Also, we should provide more advice on how to choose. I.e.
```
1: Easy - Allow both HTTP and HTTPS access to these sites. Choose this if you haven't yet tested that your sites function properly when visited on HTTPS.
2: Secure - Make all requests redirect to secure HTTPS access. Choose this for new sites, or if you're confident your site works on HTTPS. You can undo this change by editing your web server's configuration.
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `certbot/display/enhancements.py`
Content:
```
1 """Certbot Enhancement Display"""
2 import logging
3
4 import zope.component
5
6 from certbot import errors
7 from certbot import interfaces
8 from certbot.display import util as display_util
9
10
11 logger = logging.getLogger(__name__)
12
13 # Define a helper function to avoid verbose code
14 util = zope.component.getUtility
15
16
17 def ask(enhancement):
18 """Display the enhancement to the user.
19
20 :param str enhancement: One of the
21 :class:`certbot.CONFIG.ENHANCEMENTS` enhancements
22
23 :returns: True if feature is desired, False otherwise
24 :rtype: bool
25
26 :raises .errors.Error: if the enhancement provided is not supported
27
28 """
29 try:
30 # Call the appropriate function based on the enhancement
31 return DISPATCH[enhancement]()
32 except KeyError:
33 logger.error("Unsupported enhancement given to ask(): %s", enhancement)
34 raise errors.Error("Unsupported Enhancement")
35
36
37 def redirect_by_default():
38 """Determines whether the user would like to redirect to HTTPS.
39
40 :returns: True if redirect is desired, False otherwise
41 :rtype: bool
42
43 """
44 choices = [
45 ("Easy", "Allow both HTTP and HTTPS access to these sites"),
46 ("Secure", "Make all requests redirect to secure HTTPS access"),
47 ]
48
49 code, selection = util(interfaces.IDisplay).menu(
50 "Please choose whether HTTPS access is required or optional.",
51 choices, default=0,
52 cli_flag="--redirect / --no-redirect", force_interactive=True)
53
54 if code != display_util.OK:
55 return False
56
57 return selection == 1
58
59
60 DISPATCH = {
61 "redirect": redirect_by_default
62 }
63
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/certbot/display/enhancements.py b/certbot/display/enhancements.py
--- a/certbot/display/enhancements.py
+++ b/certbot/display/enhancements.py
@@ -42,12 +42,14 @@
"""
choices = [
- ("Easy", "Allow both HTTP and HTTPS access to these sites"),
- ("Secure", "Make all requests redirect to secure HTTPS access"),
+ ("No redirect", "Make no further changes to the webserver configuration."),
+ ("Redirect", "Make all requests redirect to secure HTTPS access. "
+ "Choose this for new sites, or if you're confident your site works on HTTPS. "
+ "You can undo this change by editing your web server's configuration."),
]
code, selection = util(interfaces.IDisplay).menu(
- "Please choose whether HTTPS access is required or optional.",
+ "Please choose whether or not to redirect HTTP traffic to HTTPS, removing HTTP access.",
choices, default=0,
cli_flag="--redirect / --no-redirect", force_interactive=True)
|
{"golden_diff": "diff --git a/certbot/display/enhancements.py b/certbot/display/enhancements.py\n--- a/certbot/display/enhancements.py\n+++ b/certbot/display/enhancements.py\n@@ -42,12 +42,14 @@\n \n \"\"\"\n choices = [\n- (\"Easy\", \"Allow both HTTP and HTTPS access to these sites\"),\n- (\"Secure\", \"Make all requests redirect to secure HTTPS access\"),\n+ (\"No redirect\", \"Make no further changes to the webserver configuration.\"),\n+ (\"Redirect\", \"Make all requests redirect to secure HTTPS access. \"\n+ \"Choose this for new sites, or if you're confident your site works on HTTPS. \"\n+ \"You can undo this change by editing your web server's configuration.\"),\n ]\n \n code, selection = util(interfaces.IDisplay).menu(\n- \"Please choose whether HTTPS access is required or optional.\",\n+ \"Please choose whether or not to redirect HTTP traffic to HTTPS, removing HTTP access.\",\n choices, default=0,\n cli_flag=\"--redirect / --no-redirect\", force_interactive=True)\n", "issue": "Add more context to \"Easy vs Secure\" prompt\nRight now you get this prompt for redirect\n\n```\nPlease choose whether HTTPS access is required or optional.\n-------------------------------------------------------------------------------\n1: Easy - Allow both HTTP and HTTPS access to these sites\n2: Secure - Make all requests redirect to secure HTTPS access\n-------------------------------------------------------------------------------\n```\n\nOne confusing thing: The title asks \"required or optional,\" but those words don't appear in either option.\n\nAlso, we should provide more advice on how to choose. I.e. \n\n```\n1: Easy - Allow both HTTP and HTTPS access to these sites. Choose this if you haven't yet tested that your sites function properly when visited on HTTPS.\n2: Secure - Make all requests redirect to secure HTTPS access. Choose this for new sites, or if you're confident your site works on HTTPS. You can undo this change by editing your web server's configuration.\n```\n\n", "before_files": [{"content": "\"\"\"Certbot Enhancement Display\"\"\"\nimport logging\n\nimport zope.component\n\nfrom certbot import errors\nfrom certbot import interfaces\nfrom certbot.display import util as display_util\n\n\nlogger = logging.getLogger(__name__)\n\n# Define a helper function to avoid verbose code\nutil = zope.component.getUtility\n\n\ndef ask(enhancement):\n \"\"\"Display the enhancement to the user.\n\n :param str enhancement: One of the\n :class:`certbot.CONFIG.ENHANCEMENTS` enhancements\n\n :returns: True if feature is desired, False otherwise\n :rtype: bool\n\n :raises .errors.Error: if the enhancement provided is not supported\n\n \"\"\"\n try:\n # Call the appropriate function based on the enhancement\n return DISPATCH[enhancement]()\n except KeyError:\n logger.error(\"Unsupported enhancement given to ask(): %s\", enhancement)\n raise errors.Error(\"Unsupported Enhancement\")\n\n\ndef redirect_by_default():\n \"\"\"Determines whether the user would like to redirect to HTTPS.\n\n :returns: True if redirect is desired, False otherwise\n :rtype: bool\n\n \"\"\"\n choices = [\n (\"Easy\", \"Allow both HTTP and HTTPS access to these sites\"),\n (\"Secure\", \"Make all requests redirect to secure HTTPS access\"),\n ]\n\n code, selection = util(interfaces.IDisplay).menu(\n \"Please choose whether HTTPS access is required or optional.\",\n choices, default=0,\n cli_flag=\"--redirect / --no-redirect\", force_interactive=True)\n\n if code != display_util.OK:\n return False\n\n return selection == 1\n\n\nDISPATCH = {\n \"redirect\": redirect_by_default\n}\n", "path": "certbot/display/enhancements.py"}], "after_files": [{"content": "\"\"\"Certbot Enhancement Display\"\"\"\nimport logging\n\nimport zope.component\n\nfrom certbot import errors\nfrom certbot import interfaces\nfrom certbot.display import util as display_util\n\n\nlogger = logging.getLogger(__name__)\n\n# Define a helper function to avoid verbose code\nutil = zope.component.getUtility\n\n\ndef ask(enhancement):\n \"\"\"Display the enhancement to the user.\n\n :param str enhancement: One of the\n :class:`certbot.CONFIG.ENHANCEMENTS` enhancements\n\n :returns: True if feature is desired, False otherwise\n :rtype: bool\n\n :raises .errors.Error: if the enhancement provided is not supported\n\n \"\"\"\n try:\n # Call the appropriate function based on the enhancement\n return DISPATCH[enhancement]()\n except KeyError:\n logger.error(\"Unsupported enhancement given to ask(): %s\", enhancement)\n raise errors.Error(\"Unsupported Enhancement\")\n\n\ndef redirect_by_default():\n \"\"\"Determines whether the user would like to redirect to HTTPS.\n\n :returns: True if redirect is desired, False otherwise\n :rtype: bool\n\n \"\"\"\n choices = [\n (\"No redirect\", \"Make no further changes to the webserver configuration.\"),\n (\"Redirect\", \"Make all requests redirect to secure HTTPS access. \"\n \"Choose this for new sites, or if you're confident your site works on HTTPS. \"\n \"You can undo this change by editing your web server's configuration.\"),\n ]\n\n code, selection = util(interfaces.IDisplay).menu(\n \"Please choose whether or not to redirect HTTP traffic to HTTPS, removing HTTP access.\",\n choices, default=0,\n cli_flag=\"--redirect / --no-redirect\", force_interactive=True)\n\n if code != display_util.OK:\n return False\n\n return selection == 1\n\n\nDISPATCH = {\n \"redirect\": redirect_by_default\n}\n", "path": "certbot/display/enhancements.py"}]}
| 918 | 237 |
gh_patches_debug_13250
|
rasdani/github-patches
|
git_diff
|
scrapy__scrapy-6099
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Deprecate Reppy support
https://github.com/scrapy/scrapy/issues/5226#issuecomment-897621867
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scrapy/robotstxt.py`
Content:
```
1 import logging
2 import sys
3 from abc import ABCMeta, abstractmethod
4
5 from scrapy.utils.python import to_unicode
6
7 logger = logging.getLogger(__name__)
8
9
10 def decode_robotstxt(robotstxt_body, spider, to_native_str_type=False):
11 try:
12 if to_native_str_type:
13 robotstxt_body = to_unicode(robotstxt_body)
14 else:
15 robotstxt_body = robotstxt_body.decode("utf-8")
16 except UnicodeDecodeError:
17 # If we found garbage or robots.txt in an encoding other than UTF-8, disregard it.
18 # Switch to 'allow all' state.
19 logger.warning(
20 "Failure while parsing robots.txt. File either contains garbage or "
21 "is in an encoding other than UTF-8, treating it as an empty file.",
22 exc_info=sys.exc_info(),
23 extra={"spider": spider},
24 )
25 robotstxt_body = ""
26 return robotstxt_body
27
28
29 class RobotParser(metaclass=ABCMeta):
30 @classmethod
31 @abstractmethod
32 def from_crawler(cls, crawler, robotstxt_body):
33 """Parse the content of a robots.txt_ file as bytes. This must be a class method.
34 It must return a new instance of the parser backend.
35
36 :param crawler: crawler which made the request
37 :type crawler: :class:`~scrapy.crawler.Crawler` instance
38
39 :param robotstxt_body: content of a robots.txt_ file.
40 :type robotstxt_body: bytes
41 """
42 pass
43
44 @abstractmethod
45 def allowed(self, url, user_agent):
46 """Return ``True`` if ``user_agent`` is allowed to crawl ``url``, otherwise return ``False``.
47
48 :param url: Absolute URL
49 :type url: str
50
51 :param user_agent: User agent
52 :type user_agent: str
53 """
54 pass
55
56
57 class PythonRobotParser(RobotParser):
58 def __init__(self, robotstxt_body, spider):
59 from urllib.robotparser import RobotFileParser
60
61 self.spider = spider
62 robotstxt_body = decode_robotstxt(
63 robotstxt_body, spider, to_native_str_type=True
64 )
65 self.rp = RobotFileParser()
66 self.rp.parse(robotstxt_body.splitlines())
67
68 @classmethod
69 def from_crawler(cls, crawler, robotstxt_body):
70 spider = None if not crawler else crawler.spider
71 o = cls(robotstxt_body, spider)
72 return o
73
74 def allowed(self, url, user_agent):
75 user_agent = to_unicode(user_agent)
76 url = to_unicode(url)
77 return self.rp.can_fetch(user_agent, url)
78
79
80 class ReppyRobotParser(RobotParser):
81 def __init__(self, robotstxt_body, spider):
82 from reppy.robots import Robots
83
84 self.spider = spider
85 self.rp = Robots.parse("", robotstxt_body)
86
87 @classmethod
88 def from_crawler(cls, crawler, robotstxt_body):
89 spider = None if not crawler else crawler.spider
90 o = cls(robotstxt_body, spider)
91 return o
92
93 def allowed(self, url, user_agent):
94 return self.rp.allowed(url, user_agent)
95
96
97 class RerpRobotParser(RobotParser):
98 def __init__(self, robotstxt_body, spider):
99 from robotexclusionrulesparser import RobotExclusionRulesParser
100
101 self.spider = spider
102 self.rp = RobotExclusionRulesParser()
103 robotstxt_body = decode_robotstxt(robotstxt_body, spider)
104 self.rp.parse(robotstxt_body)
105
106 @classmethod
107 def from_crawler(cls, crawler, robotstxt_body):
108 spider = None if not crawler else crawler.spider
109 o = cls(robotstxt_body, spider)
110 return o
111
112 def allowed(self, url, user_agent):
113 user_agent = to_unicode(user_agent)
114 url = to_unicode(url)
115 return self.rp.is_allowed(user_agent, url)
116
117
118 class ProtegoRobotParser(RobotParser):
119 def __init__(self, robotstxt_body, spider):
120 from protego import Protego
121
122 self.spider = spider
123 robotstxt_body = decode_robotstxt(robotstxt_body, spider)
124 self.rp = Protego.parse(robotstxt_body)
125
126 @classmethod
127 def from_crawler(cls, crawler, robotstxt_body):
128 spider = None if not crawler else crawler.spider
129 o = cls(robotstxt_body, spider)
130 return o
131
132 def allowed(self, url, user_agent):
133 user_agent = to_unicode(user_agent)
134 url = to_unicode(url)
135 return self.rp.can_fetch(url, user_agent)
136
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/scrapy/robotstxt.py b/scrapy/robotstxt.py
--- a/scrapy/robotstxt.py
+++ b/scrapy/robotstxt.py
@@ -1,7 +1,9 @@
import logging
import sys
from abc import ABCMeta, abstractmethod
+from warnings import warn
+from scrapy.exceptions import ScrapyDeprecationWarning
from scrapy.utils.python import to_unicode
logger = logging.getLogger(__name__)
@@ -79,6 +81,7 @@
class ReppyRobotParser(RobotParser):
def __init__(self, robotstxt_body, spider):
+ warn("ReppyRobotParser is deprecated.", ScrapyDeprecationWarning, stacklevel=2)
from reppy.robots import Robots
self.spider = spider
|
{"golden_diff": "diff --git a/scrapy/robotstxt.py b/scrapy/robotstxt.py\n--- a/scrapy/robotstxt.py\n+++ b/scrapy/robotstxt.py\n@@ -1,7 +1,9 @@\n import logging\n import sys\n from abc import ABCMeta, abstractmethod\n+from warnings import warn\n \n+from scrapy.exceptions import ScrapyDeprecationWarning\n from scrapy.utils.python import to_unicode\n \n logger = logging.getLogger(__name__)\n@@ -79,6 +81,7 @@\n \n class ReppyRobotParser(RobotParser):\n def __init__(self, robotstxt_body, spider):\n+ warn(\"ReppyRobotParser is deprecated.\", ScrapyDeprecationWarning, stacklevel=2)\n from reppy.robots import Robots\n \n self.spider = spider\n", "issue": "Deprecate Reppy support\nhttps://github.com/scrapy/scrapy/issues/5226#issuecomment-897621867\n", "before_files": [{"content": "import logging\nimport sys\nfrom abc import ABCMeta, abstractmethod\n\nfrom scrapy.utils.python import to_unicode\n\nlogger = logging.getLogger(__name__)\n\n\ndef decode_robotstxt(robotstxt_body, spider, to_native_str_type=False):\n try:\n if to_native_str_type:\n robotstxt_body = to_unicode(robotstxt_body)\n else:\n robotstxt_body = robotstxt_body.decode(\"utf-8\")\n except UnicodeDecodeError:\n # If we found garbage or robots.txt in an encoding other than UTF-8, disregard it.\n # Switch to 'allow all' state.\n logger.warning(\n \"Failure while parsing robots.txt. File either contains garbage or \"\n \"is in an encoding other than UTF-8, treating it as an empty file.\",\n exc_info=sys.exc_info(),\n extra={\"spider\": spider},\n )\n robotstxt_body = \"\"\n return robotstxt_body\n\n\nclass RobotParser(metaclass=ABCMeta):\n @classmethod\n @abstractmethod\n def from_crawler(cls, crawler, robotstxt_body):\n \"\"\"Parse the content of a robots.txt_ file as bytes. This must be a class method.\n It must return a new instance of the parser backend.\n\n :param crawler: crawler which made the request\n :type crawler: :class:`~scrapy.crawler.Crawler` instance\n\n :param robotstxt_body: content of a robots.txt_ file.\n :type robotstxt_body: bytes\n \"\"\"\n pass\n\n @abstractmethod\n def allowed(self, url, user_agent):\n \"\"\"Return ``True`` if ``user_agent`` is allowed to crawl ``url``, otherwise return ``False``.\n\n :param url: Absolute URL\n :type url: str\n\n :param user_agent: User agent\n :type user_agent: str\n \"\"\"\n pass\n\n\nclass PythonRobotParser(RobotParser):\n def __init__(self, robotstxt_body, spider):\n from urllib.robotparser import RobotFileParser\n\n self.spider = spider\n robotstxt_body = decode_robotstxt(\n robotstxt_body, spider, to_native_str_type=True\n )\n self.rp = RobotFileParser()\n self.rp.parse(robotstxt_body.splitlines())\n\n @classmethod\n def from_crawler(cls, crawler, robotstxt_body):\n spider = None if not crawler else crawler.spider\n o = cls(robotstxt_body, spider)\n return o\n\n def allowed(self, url, user_agent):\n user_agent = to_unicode(user_agent)\n url = to_unicode(url)\n return self.rp.can_fetch(user_agent, url)\n\n\nclass ReppyRobotParser(RobotParser):\n def __init__(self, robotstxt_body, spider):\n from reppy.robots import Robots\n\n self.spider = spider\n self.rp = Robots.parse(\"\", robotstxt_body)\n\n @classmethod\n def from_crawler(cls, crawler, robotstxt_body):\n spider = None if not crawler else crawler.spider\n o = cls(robotstxt_body, spider)\n return o\n\n def allowed(self, url, user_agent):\n return self.rp.allowed(url, user_agent)\n\n\nclass RerpRobotParser(RobotParser):\n def __init__(self, robotstxt_body, spider):\n from robotexclusionrulesparser import RobotExclusionRulesParser\n\n self.spider = spider\n self.rp = RobotExclusionRulesParser()\n robotstxt_body = decode_robotstxt(robotstxt_body, spider)\n self.rp.parse(robotstxt_body)\n\n @classmethod\n def from_crawler(cls, crawler, robotstxt_body):\n spider = None if not crawler else crawler.spider\n o = cls(robotstxt_body, spider)\n return o\n\n def allowed(self, url, user_agent):\n user_agent = to_unicode(user_agent)\n url = to_unicode(url)\n return self.rp.is_allowed(user_agent, url)\n\n\nclass ProtegoRobotParser(RobotParser):\n def __init__(self, robotstxt_body, spider):\n from protego import Protego\n\n self.spider = spider\n robotstxt_body = decode_robotstxt(robotstxt_body, spider)\n self.rp = Protego.parse(robotstxt_body)\n\n @classmethod\n def from_crawler(cls, crawler, robotstxt_body):\n spider = None if not crawler else crawler.spider\n o = cls(robotstxt_body, spider)\n return o\n\n def allowed(self, url, user_agent):\n user_agent = to_unicode(user_agent)\n url = to_unicode(url)\n return self.rp.can_fetch(url, user_agent)\n", "path": "scrapy/robotstxt.py"}], "after_files": [{"content": "import logging\nimport sys\nfrom abc import ABCMeta, abstractmethod\nfrom warnings import warn\n\nfrom scrapy.exceptions import ScrapyDeprecationWarning\nfrom scrapy.utils.python import to_unicode\n\nlogger = logging.getLogger(__name__)\n\n\ndef decode_robotstxt(robotstxt_body, spider, to_native_str_type=False):\n try:\n if to_native_str_type:\n robotstxt_body = to_unicode(robotstxt_body)\n else:\n robotstxt_body = robotstxt_body.decode(\"utf-8\")\n except UnicodeDecodeError:\n # If we found garbage or robots.txt in an encoding other than UTF-8, disregard it.\n # Switch to 'allow all' state.\n logger.warning(\n \"Failure while parsing robots.txt. File either contains garbage or \"\n \"is in an encoding other than UTF-8, treating it as an empty file.\",\n exc_info=sys.exc_info(),\n extra={\"spider\": spider},\n )\n robotstxt_body = \"\"\n return robotstxt_body\n\n\nclass RobotParser(metaclass=ABCMeta):\n @classmethod\n @abstractmethod\n def from_crawler(cls, crawler, robotstxt_body):\n \"\"\"Parse the content of a robots.txt_ file as bytes. This must be a class method.\n It must return a new instance of the parser backend.\n\n :param crawler: crawler which made the request\n :type crawler: :class:`~scrapy.crawler.Crawler` instance\n\n :param robotstxt_body: content of a robots.txt_ file.\n :type robotstxt_body: bytes\n \"\"\"\n pass\n\n @abstractmethod\n def allowed(self, url, user_agent):\n \"\"\"Return ``True`` if ``user_agent`` is allowed to crawl ``url``, otherwise return ``False``.\n\n :param url: Absolute URL\n :type url: str\n\n :param user_agent: User agent\n :type user_agent: str\n \"\"\"\n pass\n\n\nclass PythonRobotParser(RobotParser):\n def __init__(self, robotstxt_body, spider):\n from urllib.robotparser import RobotFileParser\n\n self.spider = spider\n robotstxt_body = decode_robotstxt(\n robotstxt_body, spider, to_native_str_type=True\n )\n self.rp = RobotFileParser()\n self.rp.parse(robotstxt_body.splitlines())\n\n @classmethod\n def from_crawler(cls, crawler, robotstxt_body):\n spider = None if not crawler else crawler.spider\n o = cls(robotstxt_body, spider)\n return o\n\n def allowed(self, url, user_agent):\n user_agent = to_unicode(user_agent)\n url = to_unicode(url)\n return self.rp.can_fetch(user_agent, url)\n\n\nclass ReppyRobotParser(RobotParser):\n def __init__(self, robotstxt_body, spider):\n warn(\"ReppyRobotParser is deprecated.\", ScrapyDeprecationWarning, stacklevel=2)\n from reppy.robots import Robots\n\n self.spider = spider\n self.rp = Robots.parse(\"\", robotstxt_body)\n\n @classmethod\n def from_crawler(cls, crawler, robotstxt_body):\n spider = None if not crawler else crawler.spider\n o = cls(robotstxt_body, spider)\n return o\n\n def allowed(self, url, user_agent):\n return self.rp.allowed(url, user_agent)\n\n\nclass RerpRobotParser(RobotParser):\n def __init__(self, robotstxt_body, spider):\n from robotexclusionrulesparser import RobotExclusionRulesParser\n\n self.spider = spider\n self.rp = RobotExclusionRulesParser()\n robotstxt_body = decode_robotstxt(robotstxt_body, spider)\n self.rp.parse(robotstxt_body)\n\n @classmethod\n def from_crawler(cls, crawler, robotstxt_body):\n spider = None if not crawler else crawler.spider\n o = cls(robotstxt_body, spider)\n return o\n\n def allowed(self, url, user_agent):\n user_agent = to_unicode(user_agent)\n url = to_unicode(url)\n return self.rp.is_allowed(user_agent, url)\n\n\nclass ProtegoRobotParser(RobotParser):\n def __init__(self, robotstxt_body, spider):\n from protego import Protego\n\n self.spider = spider\n robotstxt_body = decode_robotstxt(robotstxt_body, spider)\n self.rp = Protego.parse(robotstxt_body)\n\n @classmethod\n def from_crawler(cls, crawler, robotstxt_body):\n spider = None if not crawler else crawler.spider\n o = cls(robotstxt_body, spider)\n return o\n\n def allowed(self, url, user_agent):\n user_agent = to_unicode(user_agent)\n url = to_unicode(url)\n return self.rp.can_fetch(url, user_agent)\n", "path": "scrapy/robotstxt.py"}]}
| 1,627 | 173 |
gh_patches_debug_1829
|
rasdani/github-patches
|
git_diff
|
googleapis__google-api-python-client-1125
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Incorrect logging level for `oauth2client.contrib.locked_file` (ref #427)
This is - as per request in the original #427 - a follow-up issue.
The function documentation says:
> Detects an appropriate cache module and returns it. Returns `googleapiclient.discovery_cache.base.Cache`, a cache object which is auto detected, or `None` if no cache object is available.
Exceptions in this context only provide a pythonic way to do control flow, and do not indicate anomalies or malfunctioning of the code. `None` is a perfectly fine, as-per-specifications behaviour. The correct level should probably be `INFO`.
Also, even if you would stand by the `WARNING` level, you should tidy up the messy log message. This is what the user is met with:
```WARNING:googleapiclient.discovery_cache:file_cache is unavailable when using oauth2client >= 4.0.0 or google-auth
Traceback (most recent call last):
File "/home/mac/dev/flashfaces/flashfaces/lib/python3.8/site-packages/googleapiclient/discovery_cache/__init__.py", line 36, in autodetect
from google.appengine.api import memcache
ModuleNotFoundError: No module named 'google.appengine'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/mac/dev/flashfaces/flashfaces/lib/python3.8/site-packages/googleapiclient/discovery_cache/file_cache.py", line 33, in <module>
from oauth2client.contrib.locked_file import LockedFile
ModuleNotFoundError: No module named 'oauth2client.contrib.locked_file'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/mac/dev/flashfaces/flashfaces/lib/python3.8/site-packages/googleapiclient/discovery_cache/file_cache.py", line 37, in <module>
from oauth2client.locked_file import LockedFile
ModuleNotFoundError: No module named 'oauth2client.locked_file'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/mac/dev/flashfaces/flashfaces/lib/python3.8/site-packages/googleapiclient/discovery_cache/__init__.py", line 42, in autodetect
from . import file_cache
File "/home/mac/dev/flashfaces/flashfaces/lib/python3.8/site-packages/googleapiclient/discovery_cache/file_cache.py", line 40, in <module>
raise ImportError(
ImportError: file_cache is unavailable when using oauth2client >= 4.0.0 or google-auth
```
A saner way to to communicate with the user could potentially be:
`WARNING - No caching available (file_cache is unavailable when using oauth2client >= 4.0.0 or google-auth)`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `googleapiclient/discovery_cache/__init__.py`
Content:
```
1 # Copyright 2014 Google Inc. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Caching utility for the discovery document."""
16
17 from __future__ import absolute_import
18
19 import logging
20 import datetime
21 import os
22
23 LOGGER = logging.getLogger(__name__)
24
25 DISCOVERY_DOC_MAX_AGE = 60 * 60 * 24 # 1 day
26
27
28 def autodetect():
29 """Detects an appropriate cache module and returns it.
30
31 Returns:
32 googleapiclient.discovery_cache.base.Cache, a cache object which
33 is auto detected, or None if no cache object is available.
34 """
35 if 'APPENGINE_RUNTIME' in os.environ:
36 try:
37 from google.appengine.api import memcache
38 from . import appengine_memcache
39
40 return appengine_memcache.cache
41 except Exception:
42 pass
43 try:
44 from . import file_cache
45
46 return file_cache.cache
47 except Exception as e:
48 LOGGER.warning(e, exc_info=True)
49 return None
50
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/googleapiclient/discovery_cache/__init__.py b/googleapiclient/discovery_cache/__init__.py
--- a/googleapiclient/discovery_cache/__init__.py
+++ b/googleapiclient/discovery_cache/__init__.py
@@ -44,6 +44,7 @@
from . import file_cache
return file_cache.cache
- except Exception as e:
- LOGGER.warning(e, exc_info=True)
+ except Exception:
+ LOGGER.info("file_cache is only supported with oauth2client<4.0.0",
+ exc_info=False)
return None
|
{"golden_diff": "diff --git a/googleapiclient/discovery_cache/__init__.py b/googleapiclient/discovery_cache/__init__.py\n--- a/googleapiclient/discovery_cache/__init__.py\n+++ b/googleapiclient/discovery_cache/__init__.py\n@@ -44,6 +44,7 @@\n from . import file_cache\n \n return file_cache.cache\n- except Exception as e:\n- LOGGER.warning(e, exc_info=True)\n+ except Exception:\n+ LOGGER.info(\"file_cache is only supported with oauth2client<4.0.0\",\n+ exc_info=False)\n return None\n", "issue": "Incorrect logging level for `oauth2client.contrib.locked_file` (ref #427)\nThis is - as per request in the original #427 - a follow-up issue.\r\n\r\nThe function documentation says:\r\n\r\n> Detects an appropriate cache module and returns it. Returns `googleapiclient.discovery_cache.base.Cache`, a cache object which is auto detected, or `None` if no cache object is available.\r\n\r\nExceptions in this context only provide a pythonic way to do control flow, and do not indicate anomalies or malfunctioning of the code. `None` is a perfectly fine, as-per-specifications behaviour. The correct level should probably be `INFO`.\r\n\r\nAlso, even if you would stand by the `WARNING` level, you should tidy up the messy log message. This is what the user is met with:\r\n\r\n```WARNING:googleapiclient.discovery_cache:file_cache is unavailable when using oauth2client >= 4.0.0 or google-auth\r\nTraceback (most recent call last):\r\n File \"/home/mac/dev/flashfaces/flashfaces/lib/python3.8/site-packages/googleapiclient/discovery_cache/__init__.py\", line 36, in autodetect\r\n from google.appengine.api import memcache\r\nModuleNotFoundError: No module named 'google.appengine'\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nTraceback (most recent call last):\r\n File \"/home/mac/dev/flashfaces/flashfaces/lib/python3.8/site-packages/googleapiclient/discovery_cache/file_cache.py\", line 33, in <module>\r\n from oauth2client.contrib.locked_file import LockedFile\r\nModuleNotFoundError: No module named 'oauth2client.contrib.locked_file'\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nTraceback (most recent call last):\r\n File \"/home/mac/dev/flashfaces/flashfaces/lib/python3.8/site-packages/googleapiclient/discovery_cache/file_cache.py\", line 37, in <module>\r\n from oauth2client.locked_file import LockedFile\r\nModuleNotFoundError: No module named 'oauth2client.locked_file'\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nTraceback (most recent call last):\r\n File \"/home/mac/dev/flashfaces/flashfaces/lib/python3.8/site-packages/googleapiclient/discovery_cache/__init__.py\", line 42, in autodetect\r\n from . import file_cache\r\n File \"/home/mac/dev/flashfaces/flashfaces/lib/python3.8/site-packages/googleapiclient/discovery_cache/file_cache.py\", line 40, in <module>\r\n raise ImportError(\r\nImportError: file_cache is unavailable when using oauth2client >= 4.0.0 or google-auth\r\n```\r\n\r\nA saner way to to communicate with the user could potentially be:\r\n\r\n`WARNING - No caching available (file_cache is unavailable when using oauth2client >= 4.0.0 or google-auth)`\r\n\n", "before_files": [{"content": "# Copyright 2014 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Caching utility for the discovery document.\"\"\"\n\nfrom __future__ import absolute_import\n\nimport logging\nimport datetime\nimport os\n\nLOGGER = logging.getLogger(__name__)\n\nDISCOVERY_DOC_MAX_AGE = 60 * 60 * 24 # 1 day\n\n\ndef autodetect():\n \"\"\"Detects an appropriate cache module and returns it.\n\n Returns:\n googleapiclient.discovery_cache.base.Cache, a cache object which\n is auto detected, or None if no cache object is available.\n \"\"\"\n if 'APPENGINE_RUNTIME' in os.environ:\n try:\n from google.appengine.api import memcache\n from . import appengine_memcache\n\n return appengine_memcache.cache\n except Exception:\n pass\n try:\n from . import file_cache\n\n return file_cache.cache\n except Exception as e:\n LOGGER.warning(e, exc_info=True)\n return None\n", "path": "googleapiclient/discovery_cache/__init__.py"}], "after_files": [{"content": "# Copyright 2014 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Caching utility for the discovery document.\"\"\"\n\nfrom __future__ import absolute_import\n\nimport logging\nimport datetime\nimport os\n\nLOGGER = logging.getLogger(__name__)\n\nDISCOVERY_DOC_MAX_AGE = 60 * 60 * 24 # 1 day\n\n\ndef autodetect():\n \"\"\"Detects an appropriate cache module and returns it.\n\n Returns:\n googleapiclient.discovery_cache.base.Cache, a cache object which\n is auto detected, or None if no cache object is available.\n \"\"\"\n if 'APPENGINE_RUNTIME' in os.environ:\n try:\n from google.appengine.api import memcache\n from . import appengine_memcache\n\n return appengine_memcache.cache\n except Exception:\n pass\n try:\n from . import file_cache\n\n return file_cache.cache\n except Exception:\n LOGGER.info(\"file_cache is only supported with oauth2client<4.0.0\",\n exc_info=False)\n return None\n", "path": "googleapiclient/discovery_cache/__init__.py"}]}
| 1,313 | 134 |
gh_patches_debug_6291
|
rasdani/github-patches
|
git_diff
|
ivy-llc__ivy-16484
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
bitwise_and
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ivy/functional/frontends/paddle/tensor/logic.py`
Content:
```
1 # global
2 import ivy
3 import ivy.functional.frontends.paddle as paddle
4 from ivy.func_wrapper import (
5 with_unsupported_dtypes,
6 handle_out_argument,
7 with_supported_dtypes,
8 )
9 from ivy.functional.frontends.paddle.func_wrapper import (
10 to_ivy_arrays_and_back,
11 )
12
13
14 @with_unsupported_dtypes(
15 {"2.4.2 and below": ("uint8", "int8", "int16", "complex64", "complex128")}, "paddle"
16 )
17 @to_ivy_arrays_and_back
18 def equal(x, y, /, *, name=None):
19 return ivy.equal(x, y)
20
21
22 @with_unsupported_dtypes(
23 {"2.4.2 and below": ("uint8", "int8", "int16", "complex64", "complex128")}, "paddle"
24 )
25 @to_ivy_arrays_and_back
26 def not_equal(x, y, /, *, name=None):
27 return ivy.not_equal(x, y)
28
29
30 @with_unsupported_dtypes(
31 {
32 "2.4.2 and below": (
33 "uint8",
34 "int8",
35 "int16",
36 "float16",
37 "complex64",
38 "complex128",
39 )
40 },
41 "paddle",
42 )
43 @to_ivy_arrays_and_back
44 def equal_all(x, y, /, *, name=None):
45 return paddle.to_tensor([ivy.array_equal(x, y)])
46
47
48 @with_unsupported_dtypes(
49 {"2.4.2 and below": ("bool", "uint8", "int8", "int16", "complex64", "complex128")},
50 "paddle",
51 )
52 @to_ivy_arrays_and_back
53 def greater_than(x, y, /, *, name=None):
54 return ivy.greater(x, y)
55
56
57 @with_unsupported_dtypes(
58 {"2.4.2 and below": ("bool", "uint8", "int8", "int16", "complex64", "complex128")},
59 "paddle",
60 )
61 @to_ivy_arrays_and_back
62 def greater_equal(x, y, /, *, name=None):
63 return ivy.greater_equal(x, y)
64
65
66 @with_unsupported_dtypes(
67 {"2.4.2 and below": ("bool", "uint8", "int8", "int16", "complex64", "complex128")},
68 "paddle",
69 )
70 @to_ivy_arrays_and_back
71 def less_than(x, y, /, *, name=None):
72 return ivy.less(x, y)
73
74
75 @with_unsupported_dtypes(
76 {"2.4.2 and below": ("bool", "uint8", "int8", "int16", "complex64", "complex128")},
77 "paddle",
78 )
79 @to_ivy_arrays_and_back
80 def less_equal(x, y, /, *, name=None):
81 return ivy.less_equal(x, y)
82
83
84 @with_supported_dtypes(
85 {
86 "2.4.2 and below": (
87 "bool",
88 "int8",
89 "int16",
90 "int32",
91 "int64",
92 "float32",
93 "float64",
94 )
95 },
96 "paddle",
97 )
98 @to_ivy_arrays_and_back
99 @handle_out_argument
100 def logical_or(x, y, /, *, name=None, out=None):
101 return ivy.logical_or(x, y, out=out)
102
103
104 @with_supported_dtypes(
105 {
106 "2.4.2 and below": (
107 "bool",
108 "int8",
109 "int16",
110 "int32",
111 "int64",
112 "float32",
113 "float64",
114 )
115 },
116 "paddle",
117 )
118 @to_ivy_arrays_and_back
119 @handle_out_argument
120 def logical_xor(x, y, /, *, name=None, out=None):
121 return ivy.logical_xor(x, y, out=out)
122
123
124 @with_supported_dtypes(
125 {
126 "2.4.2 and below": (
127 "bool",
128 "int8",
129 "int16",
130 "int32",
131 "int64",
132 "float32",
133 "float64",
134 )
135 },
136 "paddle",
137 )
138 @to_ivy_arrays_and_back
139 @handle_out_argument
140 def logical_not(x, /, *, name=None, out=None):
141 return ivy.logical_not(x, out=out)
142
143
144 @with_supported_dtypes(
145 {
146 "2.4.2 and below": (
147 "bool",
148 "uint8",
149 "int8",
150 "int16",
151 "int32",
152 "int64",
153 )
154 },
155 "paddle",
156 )
157 @to_ivy_arrays_and_back
158 @handle_out_argument
159 def bitwise_or(x, y, name=None, out=None):
160 return ivy.bitwise_or(x, y, out=out)
161
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/ivy/functional/frontends/paddle/tensor/logic.py b/ivy/functional/frontends/paddle/tensor/logic.py
--- a/ivy/functional/frontends/paddle/tensor/logic.py
+++ b/ivy/functional/frontends/paddle/tensor/logic.py
@@ -158,3 +158,22 @@
@handle_out_argument
def bitwise_or(x, y, name=None, out=None):
return ivy.bitwise_or(x, y, out=out)
+
+
+@with_supported_dtypes(
+ {
+ "2.4.2 and below": (
+ "bool",
+ "uint8",
+ "int8",
+ "int16",
+ "int32",
+ "int64",
+ )
+ },
+ "paddle",
+)
+@to_ivy_arrays_and_back
+@handle_out_argument
+def bitwise_and(x, y, /, *, name=None, out=None):
+ return ivy.bitwise_and(x, y, out=out)
|
{"golden_diff": "diff --git a/ivy/functional/frontends/paddle/tensor/logic.py b/ivy/functional/frontends/paddle/tensor/logic.py\n--- a/ivy/functional/frontends/paddle/tensor/logic.py\n+++ b/ivy/functional/frontends/paddle/tensor/logic.py\n@@ -158,3 +158,22 @@\n @handle_out_argument\n def bitwise_or(x, y, name=None, out=None):\n return ivy.bitwise_or(x, y, out=out)\n+\n+\n+@with_supported_dtypes(\n+ {\n+ \"2.4.2 and below\": (\n+ \"bool\",\n+ \"uint8\",\n+ \"int8\",\n+ \"int16\",\n+ \"int32\",\n+ \"int64\",\n+ )\n+ },\n+ \"paddle\",\n+)\n+@to_ivy_arrays_and_back\n+@handle_out_argument\n+def bitwise_and(x, y, /, *, name=None, out=None):\n+ return ivy.bitwise_and(x, y, out=out)\n", "issue": "bitwise_and\n\n", "before_files": [{"content": "# global\nimport ivy\nimport ivy.functional.frontends.paddle as paddle\nfrom ivy.func_wrapper import (\n with_unsupported_dtypes,\n handle_out_argument,\n with_supported_dtypes,\n)\nfrom ivy.functional.frontends.paddle.func_wrapper import (\n to_ivy_arrays_and_back,\n)\n\n\n@with_unsupported_dtypes(\n {\"2.4.2 and below\": (\"uint8\", \"int8\", \"int16\", \"complex64\", \"complex128\")}, \"paddle\"\n)\n@to_ivy_arrays_and_back\ndef equal(x, y, /, *, name=None):\n return ivy.equal(x, y)\n\n\n@with_unsupported_dtypes(\n {\"2.4.2 and below\": (\"uint8\", \"int8\", \"int16\", \"complex64\", \"complex128\")}, \"paddle\"\n)\n@to_ivy_arrays_and_back\ndef not_equal(x, y, /, *, name=None):\n return ivy.not_equal(x, y)\n\n\n@with_unsupported_dtypes(\n {\n \"2.4.2 and below\": (\n \"uint8\",\n \"int8\",\n \"int16\",\n \"float16\",\n \"complex64\",\n \"complex128\",\n )\n },\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef equal_all(x, y, /, *, name=None):\n return paddle.to_tensor([ivy.array_equal(x, y)])\n\n\n@with_unsupported_dtypes(\n {\"2.4.2 and below\": (\"bool\", \"uint8\", \"int8\", \"int16\", \"complex64\", \"complex128\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef greater_than(x, y, /, *, name=None):\n return ivy.greater(x, y)\n\n\n@with_unsupported_dtypes(\n {\"2.4.2 and below\": (\"bool\", \"uint8\", \"int8\", \"int16\", \"complex64\", \"complex128\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef greater_equal(x, y, /, *, name=None):\n return ivy.greater_equal(x, y)\n\n\n@with_unsupported_dtypes(\n {\"2.4.2 and below\": (\"bool\", \"uint8\", \"int8\", \"int16\", \"complex64\", \"complex128\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef less_than(x, y, /, *, name=None):\n return ivy.less(x, y)\n\n\n@with_unsupported_dtypes(\n {\"2.4.2 and below\": (\"bool\", \"uint8\", \"int8\", \"int16\", \"complex64\", \"complex128\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef less_equal(x, y, /, *, name=None):\n return ivy.less_equal(x, y)\n\n\n@with_supported_dtypes(\n {\n \"2.4.2 and below\": (\n \"bool\",\n \"int8\",\n \"int16\",\n \"int32\",\n \"int64\",\n \"float32\",\n \"float64\",\n )\n },\n \"paddle\",\n)\n@to_ivy_arrays_and_back\n@handle_out_argument\ndef logical_or(x, y, /, *, name=None, out=None):\n return ivy.logical_or(x, y, out=out)\n\n\n@with_supported_dtypes(\n {\n \"2.4.2 and below\": (\n \"bool\",\n \"int8\",\n \"int16\",\n \"int32\",\n \"int64\",\n \"float32\",\n \"float64\",\n )\n },\n \"paddle\",\n)\n@to_ivy_arrays_and_back\n@handle_out_argument\ndef logical_xor(x, y, /, *, name=None, out=None):\n return ivy.logical_xor(x, y, out=out)\n\n\n@with_supported_dtypes(\n {\n \"2.4.2 and below\": (\n \"bool\",\n \"int8\",\n \"int16\",\n \"int32\",\n \"int64\",\n \"float32\",\n \"float64\",\n )\n },\n \"paddle\",\n)\n@to_ivy_arrays_and_back\n@handle_out_argument\ndef logical_not(x, /, *, name=None, out=None):\n return ivy.logical_not(x, out=out)\n\n\n@with_supported_dtypes(\n {\n \"2.4.2 and below\": (\n \"bool\",\n \"uint8\",\n \"int8\",\n \"int16\",\n \"int32\",\n \"int64\",\n )\n },\n \"paddle\",\n)\n@to_ivy_arrays_and_back\n@handle_out_argument\ndef bitwise_or(x, y, name=None, out=None):\n return ivy.bitwise_or(x, y, out=out)\n", "path": "ivy/functional/frontends/paddle/tensor/logic.py"}], "after_files": [{"content": "# global\nimport ivy\nimport ivy.functional.frontends.paddle as paddle\nfrom ivy.func_wrapper import (\n with_unsupported_dtypes,\n handle_out_argument,\n with_supported_dtypes,\n)\nfrom ivy.functional.frontends.paddle.func_wrapper import (\n to_ivy_arrays_and_back,\n)\n\n\n@with_unsupported_dtypes(\n {\"2.4.2 and below\": (\"uint8\", \"int8\", \"int16\", \"complex64\", \"complex128\")}, \"paddle\"\n)\n@to_ivy_arrays_and_back\ndef equal(x, y, /, *, name=None):\n return ivy.equal(x, y)\n\n\n@with_unsupported_dtypes(\n {\"2.4.2 and below\": (\"uint8\", \"int8\", \"int16\", \"complex64\", \"complex128\")}, \"paddle\"\n)\n@to_ivy_arrays_and_back\ndef not_equal(x, y, /, *, name=None):\n return ivy.not_equal(x, y)\n\n\n@with_unsupported_dtypes(\n {\n \"2.4.2 and below\": (\n \"uint8\",\n \"int8\",\n \"int16\",\n \"float16\",\n \"complex64\",\n \"complex128\",\n )\n },\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef equal_all(x, y, /, *, name=None):\n return paddle.to_tensor([ivy.array_equal(x, y)])\n\n\n@with_unsupported_dtypes(\n {\"2.4.2 and below\": (\"bool\", \"uint8\", \"int8\", \"int16\", \"complex64\", \"complex128\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef greater_than(x, y, /, *, name=None):\n return ivy.greater(x, y)\n\n\n@with_unsupported_dtypes(\n {\"2.4.2 and below\": (\"bool\", \"uint8\", \"int8\", \"int16\", \"complex64\", \"complex128\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef greater_equal(x, y, /, *, name=None):\n return ivy.greater_equal(x, y)\n\n\n@with_unsupported_dtypes(\n {\"2.4.2 and below\": (\"bool\", \"uint8\", \"int8\", \"int16\", \"complex64\", \"complex128\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef less_than(x, y, /, *, name=None):\n return ivy.less(x, y)\n\n\n@with_unsupported_dtypes(\n {\"2.4.2 and below\": (\"bool\", \"uint8\", \"int8\", \"int16\", \"complex64\", \"complex128\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef less_equal(x, y, /, *, name=None):\n return ivy.less_equal(x, y)\n\n\n@with_supported_dtypes(\n {\n \"2.4.2 and below\": (\n \"bool\",\n \"int8\",\n \"int16\",\n \"int32\",\n \"int64\",\n \"float32\",\n \"float64\",\n )\n },\n \"paddle\",\n)\n@to_ivy_arrays_and_back\n@handle_out_argument\ndef logical_or(x, y, /, *, name=None, out=None):\n return ivy.logical_or(x, y, out=out)\n\n\n@with_supported_dtypes(\n {\n \"2.4.2 and below\": (\n \"bool\",\n \"int8\",\n \"int16\",\n \"int32\",\n \"int64\",\n \"float32\",\n \"float64\",\n )\n },\n \"paddle\",\n)\n@to_ivy_arrays_and_back\n@handle_out_argument\ndef logical_xor(x, y, /, *, name=None, out=None):\n return ivy.logical_xor(x, y, out=out)\n\n\n@with_supported_dtypes(\n {\n \"2.4.2 and below\": (\n \"bool\",\n \"int8\",\n \"int16\",\n \"int32\",\n \"int64\",\n \"float32\",\n \"float64\",\n )\n },\n \"paddle\",\n)\n@to_ivy_arrays_and_back\n@handle_out_argument\ndef logical_not(x, /, *, name=None, out=None):\n return ivy.logical_not(x, out=out)\n\n\n@with_supported_dtypes(\n {\n \"2.4.2 and below\": (\n \"bool\",\n \"uint8\",\n \"int8\",\n \"int16\",\n \"int32\",\n \"int64\",\n )\n },\n \"paddle\",\n)\n@to_ivy_arrays_and_back\n@handle_out_argument\ndef bitwise_or(x, y, name=None, out=None):\n return ivy.bitwise_or(x, y, out=out)\n\n\n@with_supported_dtypes(\n {\n \"2.4.2 and below\": (\n \"bool\",\n \"uint8\",\n \"int8\",\n \"int16\",\n \"int32\",\n \"int64\",\n )\n },\n \"paddle\",\n)\n@to_ivy_arrays_and_back\n@handle_out_argument\ndef bitwise_and(x, y, /, *, name=None, out=None):\n return ivy.bitwise_and(x, y, out=out)\n", "path": "ivy/functional/frontends/paddle/tensor/logic.py"}]}
| 1,736 | 234 |
gh_patches_debug_4357
|
rasdani/github-patches
|
git_diff
|
OpenEnergyPlatform__oeplatform-1255
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Include the Open Energy Compendium in the navigation bar
## Description of the issue
A first version of the **Open Energy Compendium** is ready for integration.
## Ideas of solution
Link under `About` / `Compendium`
## Workflow checklist
- [x] I am aware of the workflow in [CONTRIBUTING.md](https://github.com/OpenEnergyPlatform/oeplatform/blob/develop/CONTRIBUTING.md)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `oeplatform/settings.py`
Content:
```
1 """
2 Django settings for oeplatform project.
3
4 Generated by 'django-admin startproject' using Django 1.8.5.
5
6 For more information on this file, see
7 https://docs.djangoproject.com/en/1.8/topics/settings/
8
9 For the full list of settings and their values, see
10 https://docs.djangoproject.com/en/1.8/ref/settings/
11 """
12
13 # Build paths inside the project like this: os.path.join(BASE_DIR, ...)
14
15 try:
16 from .securitysettings import * # noqa
17 except ImportError:
18 import logging
19 import os
20
21 logging.error("No securitysettings found. Triggerd in oeplatform/settings.py")
22 SECRET_KEY = os.environ.get("SECRET_KEY", "0")
23 DEFAULT_FROM_EMAIL = os.environ.get("DEFAULT_FROM_EMAIL")
24 URL = os.environ.get("URL")
25
26 # Quick-start development settings - unsuitable for production
27 # See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
28
29 # Application definition
30
31 INSTALLED_APPS = (
32 "django.contrib.sites",
33 "django.contrib.admin",
34 "django.contrib.auth",
35 "django.contrib.contenttypes",
36 "django.contrib.sessions",
37 "django.contrib.messages",
38 "django.contrib.staticfiles",
39 "django.contrib.sessions.backends.signed_cookies",
40 "django_bootstrap5",
41 "rest_framework",
42 "rest_framework.authtoken",
43 "modelview",
44 "modelview.templatetags.modelview_extras",
45 "login",
46 "base",
47 "base.templatetags.base_tags",
48 "widget_tweaks",
49 "dataedit",
50 "colorfield",
51 "api",
52 "ontology",
53 "axes",
54 "captcha",
55 "django.contrib.postgres",
56 "fontawesome_5",
57 "django_better_admin_arrayfield",
58 "oeo_viewer",
59 )
60
61 MIDDLEWARE = (
62 "django.contrib.sites.middleware.CurrentSiteMiddleware",
63 "django.contrib.sessions.middleware.SessionMiddleware",
64 "django.middleware.common.CommonMiddleware",
65 "django.middleware.csrf.CsrfViewMiddleware",
66 "django.contrib.auth.middleware.AuthenticationMiddleware",
67 "django.contrib.messages.middleware.MessageMiddleware",
68 "django.middleware.clickjacking.XFrameOptionsMiddleware",
69 "django.middleware.security.SecurityMiddleware",
70 "login.middleware.DetachMiddleware",
71 "axes.middleware.AxesMiddleware",
72 "django.middleware.common.CommonMiddleware",
73 )
74
75 ROOT_URLCONF = "oeplatform.urls"
76
77 EXTERNAL_URLS = {
78 "tutorials_index": "https://openenergyplatform.github.io/academy/",
79 "tutorials_faq": "https://openenergyplatform.github.io/academy/",
80 "tutorials_api1": "https://openenergyplatform.github.io/academy/tutorials/api/OEP_API_tutorial_part1/", # noqa E501
81 "tutorials_licenses": "https://openenergyplatform.github.io/academy/tutorials/metadata/tutorial_open-data-licenses/",
82 # noqa E501
83 "readthedocs": "https://oeplatform.readthedocs.io/en/latest/?badge=latest",
84 }
85
86
87 def external_urls_context_processor(request):
88 """Define hard coded external urls here.
89 Use in templates like this: {{ EXTERNAL_URLS.<name_of_url> }}
90 Also, you may want to add an icon indicating external links, e.g.
91 """
92 return {"EXTERNAL_URLS": EXTERNAL_URLS}
93
94
95 SITE_ID = 1
96
97 TEMPLATES = [
98 {
99 "BACKEND": "django.template.backends.django.DjangoTemplates",
100 "DIRS": [],
101 "APP_DIRS": True,
102 "OPTIONS": {
103 "context_processors": [
104 "django.template.context_processors.debug",
105 "django.template.context_processors.request",
106 "django.contrib.auth.context_processors.auth",
107 "django.contrib.messages.context_processors.messages",
108 "oeplatform.settings.external_urls_context_processor",
109 ]
110 },
111 }
112 ]
113
114 CORS_ORIGIN_WHITELIST = ["http://localhost:3000", "http://127.0.0.1:3000"]
115
116 GRAPHENE = {"SCHEMA": "factsheet.schema.schema"}
117
118 WSGI_APPLICATION = "oeplatform.wsgi.application"
119
120 try:
121 ONTOLOGY_FOLDER # noqa
122 except NameError:
123 ONTOLOGY_FOLDER = "/tmp"
124
125 # Internationalization
126 # https://docs.djangoproject.com/en/1.8/topics/i18n/
127
128 LANGUAGE_CODE = "en-us"
129
130 TIME_ZONE = "Europe/Berlin"
131
132 USE_I18N = True
133
134 USE_L10N = True
135
136 USE_TZ = True
137
138 # Static files (CSS, JavaScript, Images)
139 # https://docs.djangoproject.com/en/1.8/howto/static-files/
140
141 AUTH_USER_MODEL = "login.myuser"
142 LOGIN_URL = "/user/login"
143 LOGIN_REDIRECT_URL = "/"
144
145 REST_FRAMEWORK = {
146 "DEFAULT_AUTHENTICATION_CLASSES": (
147 "rest_framework.authentication.BasicAuthentication",
148 "rest_framework.authentication.SessionAuthentication",
149 "rest_framework.authentication.TokenAuthentication",
150 )
151 }
152
153 AUTHENTICATION_BACKENDS = [
154 # AxesBackend should be the first backend in the AUTHENTICATION_BACKENDS list.
155 "axes.backends.AxesBackend",
156 # custom class extenging Django ModelBackend for login with username OR email
157 "login.backends.ModelBackendWithEmail",
158 ]
159
160 DEFAULT_AUTO_FIELD = "django.db.models.AutoField"
161
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/oeplatform/settings.py b/oeplatform/settings.py
--- a/oeplatform/settings.py
+++ b/oeplatform/settings.py
@@ -81,6 +81,7 @@
"tutorials_licenses": "https://openenergyplatform.github.io/academy/tutorials/metadata/tutorial_open-data-licenses/",
# noqa E501
"readthedocs": "https://oeplatform.readthedocs.io/en/latest/?badge=latest",
+ "compendium": "https://openenergyplatform.github.io/organisation/",
}
|
{"golden_diff": "diff --git a/oeplatform/settings.py b/oeplatform/settings.py\n--- a/oeplatform/settings.py\n+++ b/oeplatform/settings.py\n@@ -81,6 +81,7 @@\n \"tutorials_licenses\": \"https://openenergyplatform.github.io/academy/tutorials/metadata/tutorial_open-data-licenses/\",\n # noqa E501\n \"readthedocs\": \"https://oeplatform.readthedocs.io/en/latest/?badge=latest\",\n+ \"compendium\": \"https://openenergyplatform.github.io/organisation/\",\n }\n", "issue": "Include the Open Energy Compendium in the navigation bar\n## Description of the issue\r\n\r\nA first version of the **Open Energy Compendium** is ready for integration.\r\n\r\n## Ideas of solution\r\n\r\nLink under `About` / `Compendium`\r\n\r\n## Workflow checklist\r\n- [x] I am aware of the workflow in [CONTRIBUTING.md](https://github.com/OpenEnergyPlatform/oeplatform/blob/develop/CONTRIBUTING.md)\r\n\n", "before_files": [{"content": "\"\"\"\nDjango settings for oeplatform project.\n\nGenerated by 'django-admin startproject' using Django 1.8.5.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.8/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.8/ref/settings/\n\"\"\"\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\n\ntry:\n from .securitysettings import * # noqa\nexcept ImportError:\n import logging\n import os\n\n logging.error(\"No securitysettings found. Triggerd in oeplatform/settings.py\")\n SECRET_KEY = os.environ.get(\"SECRET_KEY\", \"0\")\n DEFAULT_FROM_EMAIL = os.environ.get(\"DEFAULT_FROM_EMAIL\")\n URL = os.environ.get(\"URL\")\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/\n\n# Application definition\n\nINSTALLED_APPS = (\n \"django.contrib.sites\",\n \"django.contrib.admin\",\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.sessions\",\n \"django.contrib.messages\",\n \"django.contrib.staticfiles\",\n \"django.contrib.sessions.backends.signed_cookies\",\n \"django_bootstrap5\",\n \"rest_framework\",\n \"rest_framework.authtoken\",\n \"modelview\",\n \"modelview.templatetags.modelview_extras\",\n \"login\",\n \"base\",\n \"base.templatetags.base_tags\",\n \"widget_tweaks\",\n \"dataedit\",\n \"colorfield\",\n \"api\",\n \"ontology\",\n \"axes\",\n \"captcha\",\n \"django.contrib.postgres\",\n \"fontawesome_5\",\n \"django_better_admin_arrayfield\",\n \"oeo_viewer\",\n)\n\nMIDDLEWARE = (\n \"django.contrib.sites.middleware.CurrentSiteMiddleware\",\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n \"django.middleware.security.SecurityMiddleware\",\n \"login.middleware.DetachMiddleware\",\n \"axes.middleware.AxesMiddleware\",\n \"django.middleware.common.CommonMiddleware\",\n)\n\nROOT_URLCONF = \"oeplatform.urls\"\n\nEXTERNAL_URLS = {\n \"tutorials_index\": \"https://openenergyplatform.github.io/academy/\",\n \"tutorials_faq\": \"https://openenergyplatform.github.io/academy/\",\n \"tutorials_api1\": \"https://openenergyplatform.github.io/academy/tutorials/api/OEP_API_tutorial_part1/\", # noqa E501\n \"tutorials_licenses\": \"https://openenergyplatform.github.io/academy/tutorials/metadata/tutorial_open-data-licenses/\",\n # noqa E501\n \"readthedocs\": \"https://oeplatform.readthedocs.io/en/latest/?badge=latest\",\n}\n\n\ndef external_urls_context_processor(request):\n \"\"\"Define hard coded external urls here.\n Use in templates like this: {{ EXTERNAL_URLS.<name_of_url> }}\n Also, you may want to add an icon indicating external links, e.g.\n \"\"\"\n return {\"EXTERNAL_URLS\": EXTERNAL_URLS}\n\n\nSITE_ID = 1\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [],\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n \"context_processors\": [\n \"django.template.context_processors.debug\",\n \"django.template.context_processors.request\",\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n \"oeplatform.settings.external_urls_context_processor\",\n ]\n },\n }\n]\n\nCORS_ORIGIN_WHITELIST = [\"http://localhost:3000\", \"http://127.0.0.1:3000\"]\n\nGRAPHENE = {\"SCHEMA\": \"factsheet.schema.schema\"}\n\nWSGI_APPLICATION = \"oeplatform.wsgi.application\"\n\ntry:\n ONTOLOGY_FOLDER # noqa\nexcept NameError:\n ONTOLOGY_FOLDER = \"/tmp\"\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.8/topics/i18n/\n\nLANGUAGE_CODE = \"en-us\"\n\nTIME_ZONE = \"Europe/Berlin\"\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.8/howto/static-files/\n\nAUTH_USER_MODEL = \"login.myuser\"\nLOGIN_URL = \"/user/login\"\nLOGIN_REDIRECT_URL = \"/\"\n\nREST_FRAMEWORK = {\n \"DEFAULT_AUTHENTICATION_CLASSES\": (\n \"rest_framework.authentication.BasicAuthentication\",\n \"rest_framework.authentication.SessionAuthentication\",\n \"rest_framework.authentication.TokenAuthentication\",\n )\n}\n\nAUTHENTICATION_BACKENDS = [\n # AxesBackend should be the first backend in the AUTHENTICATION_BACKENDS list.\n \"axes.backends.AxesBackend\",\n # custom class extenging Django ModelBackend for login with username OR email\n \"login.backends.ModelBackendWithEmail\",\n]\n\nDEFAULT_AUTO_FIELD = \"django.db.models.AutoField\"\n", "path": "oeplatform/settings.py"}], "after_files": [{"content": "\"\"\"\nDjango settings for oeplatform project.\n\nGenerated by 'django-admin startproject' using Django 1.8.5.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.8/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.8/ref/settings/\n\"\"\"\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\n\ntry:\n from .securitysettings import * # noqa\nexcept ImportError:\n import logging\n import os\n\n logging.error(\"No securitysettings found. Triggerd in oeplatform/settings.py\")\n SECRET_KEY = os.environ.get(\"SECRET_KEY\", \"0\")\n DEFAULT_FROM_EMAIL = os.environ.get(\"DEFAULT_FROM_EMAIL\")\n URL = os.environ.get(\"URL\")\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/\n\n# Application definition\n\nINSTALLED_APPS = (\n \"django.contrib.sites\",\n \"django.contrib.admin\",\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.sessions\",\n \"django.contrib.messages\",\n \"django.contrib.staticfiles\",\n \"django.contrib.sessions.backends.signed_cookies\",\n \"django_bootstrap5\",\n \"rest_framework\",\n \"rest_framework.authtoken\",\n \"modelview\",\n \"modelview.templatetags.modelview_extras\",\n \"login\",\n \"base\",\n \"base.templatetags.base_tags\",\n \"widget_tweaks\",\n \"dataedit\",\n \"colorfield\",\n \"api\",\n \"ontology\",\n \"axes\",\n \"captcha\",\n \"django.contrib.postgres\",\n \"fontawesome_5\",\n \"django_better_admin_arrayfield\",\n \"oeo_viewer\",\n)\n\nMIDDLEWARE = (\n \"django.contrib.sites.middleware.CurrentSiteMiddleware\",\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n \"django.middleware.security.SecurityMiddleware\",\n \"login.middleware.DetachMiddleware\",\n \"axes.middleware.AxesMiddleware\",\n \"django.middleware.common.CommonMiddleware\",\n)\n\nROOT_URLCONF = \"oeplatform.urls\"\n\nEXTERNAL_URLS = {\n \"tutorials_index\": \"https://openenergyplatform.github.io/academy/\",\n \"tutorials_faq\": \"https://openenergyplatform.github.io/academy/\",\n \"tutorials_api1\": \"https://openenergyplatform.github.io/academy/tutorials/api/OEP_API_tutorial_part1/\", # noqa E501\n \"tutorials_licenses\": \"https://openenergyplatform.github.io/academy/tutorials/metadata/tutorial_open-data-licenses/\",\n # noqa E501\n \"readthedocs\": \"https://oeplatform.readthedocs.io/en/latest/?badge=latest\",\n \"compendium\": \"https://openenergyplatform.github.io/organisation/\",\n}\n\n\ndef external_urls_context_processor(request):\n \"\"\"Define hard coded external urls here.\n Use in templates like this: {{ EXTERNAL_URLS.<name_of_url> }}\n Also, you may want to add an icon indicating external links, e.g.\n \"\"\"\n return {\"EXTERNAL_URLS\": EXTERNAL_URLS}\n\n\nSITE_ID = 1\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [],\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n \"context_processors\": [\n \"django.template.context_processors.debug\",\n \"django.template.context_processors.request\",\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n \"oeplatform.settings.external_urls_context_processor\",\n ]\n },\n }\n]\n\nCORS_ORIGIN_WHITELIST = [\"http://localhost:3000\", \"http://127.0.0.1:3000\"]\n\nGRAPHENE = {\"SCHEMA\": \"factsheet.schema.schema\"}\n\nWSGI_APPLICATION = \"oeplatform.wsgi.application\"\n\ntry:\n ONTOLOGY_FOLDER # noqa\nexcept NameError:\n ONTOLOGY_FOLDER = \"/tmp\"\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.8/topics/i18n/\n\nLANGUAGE_CODE = \"en-us\"\n\nTIME_ZONE = \"Europe/Berlin\"\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.8/howto/static-files/\n\nAUTH_USER_MODEL = \"login.myuser\"\nLOGIN_URL = \"/user/login\"\nLOGIN_REDIRECT_URL = \"/\"\n\nREST_FRAMEWORK = {\n \"DEFAULT_AUTHENTICATION_CLASSES\": (\n \"rest_framework.authentication.BasicAuthentication\",\n \"rest_framework.authentication.SessionAuthentication\",\n \"rest_framework.authentication.TokenAuthentication\",\n )\n}\n\nAUTHENTICATION_BACKENDS = [\n # AxesBackend should be the first backend in the AUTHENTICATION_BACKENDS list.\n \"axes.backends.AxesBackend\",\n # custom class extenging Django ModelBackend for login with username OR email\n \"login.backends.ModelBackendWithEmail\",\n]\n\nDEFAULT_AUTO_FIELD = \"django.db.models.AutoField\"\n", "path": "oeplatform/settings.py"}]}
| 1,862 | 121 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.